diff --git "a/6477.jsonl" "b/6477.jsonl" new file mode 100644--- /dev/null +++ "b/6477.jsonl" @@ -0,0 +1,275 @@ +{"seq_id":"37467275140","text":"from matplotlib import pyplot as plt\nimport numpy as np\nfrom spikingjelly.activation_based.examples.conv_fashion_mnist import Net\nfrom spikingjelly import visualizing\nimport torch\nimport torch.nn as nn\nimport torchvision\ndef plot_log(csv_file, title, x_label, y_label, figsize=(12, 8), plot_max=False):\n log_data = np.loadtxt(csv_file, delimiter=',', skiprows=1, usecols=(1, 2))\n x = log_data[:, 0]\n y = log_data[:, 1]\n fig = plt.figure(figsize=figsize)\n plt.plot(x, y)\n plt.xlabel(x_label, fontsize=20)\n plt.ylabel(y_label, fontsize=20)\n plt.title(title, fontsize=20)\n plt.grid(linestyle='-.')\n\n if plot_max:\n # 画最大值\n index = y.argmax()\n plt.text(x[index], y[index], '({}, {})'.format(int(x[index]), round(y[index], 3)), fontsize=14)\n plt.scatter(x[index], y[index], marker='1', alpha=0.8, linewidths=0.1, c='r')\n\n # plt.show()\nif __name__ == '__main__':\n plt.style.use(['science', 'muted'])\n plot_log('./docs/source/_static/tutorials/activation_based/4_conv_fashion_mnist/run-logs-tag-train_accuracy.csv', 'Accuracy on train batch',\n 'iteration', 'accuracy')\n plt.savefig('./docs/source/_static/tutorials/activation_based/4_conv_fashion_mnist/train.svg')\n plt.savefig('./docs/source/_static/tutorials/activation_based/4_conv_fashion_mnist/train.pdf')\n plt.savefig('./docs/source/_static/tutorials/activation_based/4_conv_fashion_mnist/train.png')\n plt.clf()\n plot_log('./docs/source/_static/tutorials/activation_based/4_conv_fashion_mnist/run-logs-tag-test_accuracy.csv', 'Accuracy on test dataset',\n 'epoch', 'accuracy', plot_max=True)\n plt.savefig('./docs/source/_static/tutorials/activation_based/4_conv_fashion_mnist/test.svg')\n plt.savefig('./docs/source/_static/tutorials/activation_based/4_conv_fashion_mnist/test.pdf')\n plt.savefig('./docs/source/_static/tutorials/activation_based/4_conv_fashion_mnist/test.png')\n exit()\n dataset_dir = input('输入保存Fashion MNIST数据集的位置,例如“./”\\n input root directory for saving Fashion MNIST dataset, e.g., \"./\": ')\n\n log_dir = input('输入保存tensorboard日志文件的位置,例如“./”\\n input root directory for saving tensorboard logs, e.g., \"./\": ')\n test_data_loader = torch.utils.data.DataLoader(\n dataset=torchvision.datasets.FashionMNIST(\n root=dataset_dir,\n train=False,\n transform=torchvision.transforms.ToTensor(),\n download=True),\n batch_size=1,\n shuffle=True,\n drop_last=False)\n net = torch.load('./net_max_acc.pt', 'cpu')\n encoder = nn.Sequential(\n net.static_conv,\n net.conv[0]\n )\n encoder.eval()\n\n with torch.no_grad():\n # 每遍历一次全部数据集,就在测试集上测试一次\n for img, label in test_data_loader:\n fig = plt.figure(dpi=200)\n plt.imshow(img.squeeze().numpy(), cmap='gray')\n plt.title('Input image', fontsize=20)\n plt.xticks([])\n plt.yticks([])\n plt.show()\n out_spikes = 0\n for t in range(net.T):\n out_spikes += encoder(img).squeeze()\n if t == 0 or t == net.T - 1:\n out_spikes_c = out_spikes.clone()\n for i in range(out_spikes_c.shape[0]):\n if out_spikes_c[i].max().item() > out_spikes_c[i].min().item():\n out_spikes_c[i] = (out_spikes_c[i] - out_spikes_c[i].min()) / (out_spikes_c[i].max() - out_spikes_c[i].min())\n visualizing.plot_2d_spiking_feature_map(out_spikes_c, 8, 16, 1, None)\n plt.title('$\\\\sum_{t} S_{t}$ at $t = ' + str(t) + '$', fontsize=20)\n plt.show()\n","repo_name":"fangwei123456/spikingjelly","sub_path":"docs/source/_static/tutorials/activation_based/4_conv_fashion_mnist/plot_logs.py","file_name":"plot_logs.py","file_ext":"py","file_size_in_byte":3803,"program_lang":"python","lang":"en","doc_type":"code","stars":944,"dataset":"github-code","pt":"16"} +{"seq_id":"32091215897","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.metrics import classification_report, confusion_matrix\nimport joblib\nimport os\n\n\ndef preprocess(dataset, x_iloc_list, y_iloc, testSize):\n # dataset = pd.read_csv(csv_file)\n X = dataset.iloc[:, x_iloc_list].values \n y = dataset.iloc[:, y_iloc].values \n\n # split into training and testing set\n from sklearn.model_selection import train_test_split\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = testSize, random_state = 0)\n\n # standardization of values\n from sklearn.preprocessing import StandardScaler\n sc = StandardScaler()\n X_train = sc.fit_transform(X_train)\n X_test = sc.transform(X_test)\n return X_train, X_test, y_train, y_test\n \n \nclass classification:\n \n def __init__(self, X_train, X_test, y_train, y_test):\n self.X_train = X_train\n self.X_test = X_test\n self.y_train = y_train\n self.y_test = y_test\n \n \n # Contour Graph if no. of feature is 2\n def classification_view(self, X_train, y_train, classifier):\n from matplotlib.colors import ListedColormap\n X_set, y_set = X_train, y_train\n X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),\n np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))\n plt.figure(figsize=(16,8))\n plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),\n alpha = 0.4, cmap = ListedColormap(('#F5716C', '#39A861')))\n plt.xlim(X1.min(), X1.max())\n plt.ylim(X2.min(), X2.max())\n for i, j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\n c = ListedColormap(('#F5716C', '#39A861'))(i), label = j)\n plt.title('Visualization of how the classification is made: errors are identified where the color of the\\\n point and the background are different')\n plt.legend()\n plt.show()\n \n\n def accuracy(self, confusion_matrix):\n sum, total = 0,0\n for i in range(len(confusion_matrix)):\n for j in range(len(confusion_matrix[0])):\n if i == j: \n sum += confusion_matrix[i,j]\n total += confusion_matrix[i,j]\n return sum/total\n\n\n def classification_report_plot(self, clf_report, filename):\n folder = \"clf_plots\"\n if not os.path.isdir(folder):\n os.mkdir(folder)\n \n out_file_name = folder + \"/\" + filename + \".png\"\n \n fig=plt.figure(figsize=(16,10))\n sns.set(font_scale=4)\n sns.heatmap(pd.DataFrame(clf_report).iloc[:-1, :].T, annot=True, cmap=\"Greens\")\n fig.savefig(out_file_name, bbox_inches=\"tight\")\n \n \n def LR(self):\n from sklearn.linear_model import LogisticRegression\n lr_classifier = LogisticRegression()\n lr_classifier.fit(self.X_train, self.y_train)\n joblib.dump(lr_classifier, \"model/lr.sav\")\n y_pred = lr_classifier.predict(self.X_test)\n\n print(\"\\n\")\n print(\"### Logistic Regression Classifier ###\")\n print('Classification Report: ')\n print(classification_report(self.y_test, y_pred),'\\n')\n print('Confusion Matrix: ')\n print(confusion_matrix(self.y_test, y_pred),'\\n')\n print('Precision: ', self.accuracy(confusion_matrix(self.y_test, y_pred))*100,'%')\n\n self.classification_report_plot(classification_report(self.y_test, y_pred, \\\n output_dict=True), \"LR\")\n \n if len(self.X_train[0]) == 2:\n self.classification_view(self.X_train, self.y_train, lr_classifier)\n \n \n def KNN(self):\n from sklearn.neighbors import KNeighborsClassifier\n knn_classifier = KNeighborsClassifier()\n knn_classifier.fit(self.X_train, self.y_train)\n joblib.dump(knn_classifier, \"model/knn.sav\")\n y_pred = knn_classifier.predict(self.X_test)\n \n print(\"\\n\")\n print(\"### K-Neighbors Classifier ###\")\n print('Classification Report: ')\n print(classification_report(self.y_test, y_pred),'\\n')\n print('Confusion Matrix: ')\n print(confusion_matrix(self.y_test, y_pred),'\\n')\n print('Precision: ', self.accuracy(confusion_matrix(self.y_test, y_pred))*100,'%')\n\n self.classification_report_plot(classification_report(self.y_test, y_pred, \\\n output_dict=True), \"KNN\")\n \n if len(self.X_train[0]) == 2:\n self.classification_view(self.X_train, self.y_train, knn_classifier)\n \n \n # kernel type could be 'linear' or 'rbf' (Gaussian)\n def SVM(self, kernel_type):\n from sklearn.svm import SVC\n svm_classifier = SVC(kernel = kernel_type)\n svm_classifier.fit(self.X_train, self.y_train)\n joblib.dump(svm_classifier, \"model/svm.sav\")\n y_pred = svm_classifier.predict(self.X_test)\n \n print(\"\\n\")\n print(\"### Support Vector Classifier (\" + kernel_type + \") ###\")\n print('Classification Report: ')\n print(classification_report(self.y_test, y_pred),'\\n')\n print('Confusion Matrix: ')\n print(confusion_matrix(self.y_test, y_pred),'\\n')\n print('Precision: ', self.accuracy(confusion_matrix(self.y_test, y_pred))*100,'%')\n\n self.classification_report_plot(classification_report(self.y_test, y_pred, \\\n output_dict=True), \"SVC\"+kernel_type)\n \n if len(self.X_train[0]) == 2:\n self.classification_view(self.X_train, self.y_train, svm_classifier)\n \n \n def NB(self):\n from sklearn.naive_bayes import GaussianNB\n nb_classifier = GaussianNB()\n nb_classifier.fit(self.X_train, self.y_train)\n joblib.dump(nb_classifier, \"model/nb.sav\")\n y_pred = nb_classifier.predict(self.X_test)\n \n print(\"\\n\")\n print(\"### Naive Bayes Classifier ###\")\n print('Classification Report: ')\n print(classification_report(self.y_test, y_pred),'\\n')\n print('Confusion Matrix: ')\n print(confusion_matrix(self.y_test, y_pred),'\\n')\n print('Precision: ', self.accuracy(confusion_matrix(self.y_test, y_pred))*100,'%')\n\n self.classification_report_plot(classification_report(self.y_test, y_pred, \\\n output_dict=True), \"NB\")\n \n if len(self.X_train[0]) == 2:\n self.classification_view(self.X_train, self.y_train, nb_classifier)\n \n \n def DT(self):\n from sklearn.tree import DecisionTreeClassifier\n tree_classifier = DecisionTreeClassifier()\n tree_classifier.fit(self.X_train, self.y_train)\n joblib.dump(tree_classifier, \"model/tree.sav\")\n y_pred = tree_classifier.predict(self.X_test)\n \n print(\"\\n\")\n print(\"### Decision Tree Classifier ###\")\n print('Classification Report: ')\n print(classification_report(self.y_test, y_pred),'\\n')\n print('Confusion Matrix: ')\n print(confusion_matrix(self.y_test, y_pred),'\\n')\n print('Precision: ', self.accuracy(confusion_matrix(self.y_test, y_pred))*100,'%')\n\n self.classification_report_plot(classification_report(self.y_test, y_pred, \\\n output_dict=True), \"DT\")\n \n if len(self.X_train[0]) == 2:\n self.classification_view(self.X_train, self.y_train, tree_classifier)\n \n \n def RF(self):\n from sklearn.ensemble import RandomForestClassifier\n rf_classifier = RandomForestClassifier(n_estimators = 10, criterion = 'entropy')\n rf_classifier.fit(self.X_train, self.y_train)\n joblib.dump(rf_classifier, \"model/rf.sav\")\n y_pred = rf_classifier.predict(self.X_test)\n \n print(\"\\n\")\n print(\"### Random Forest Classifier ###\")\n print('Classification Report: ')\n print(classification_report(self.y_test, y_pred),'\\n')\n print('Confusion Matrix: ')\n print(confusion_matrix(self.y_test, y_pred),'\\n')\n print('Precision: ', self.accuracy(confusion_matrix(self.y_test, y_pred))*100,'%')\n\n self.classification_report_plot(classification_report(self.y_test, y_pred, \\\n output_dict=True), \"RF\")\n \n if len(self.X_train[0]) == 2:\n self.classification_view(self.X_train, self.y_train, rf_classifier)\n \n \n","repo_name":"shantoroy/OS_fingerprinting_using-ML","sub_path":"ml_template.py","file_name":"ml_template.py","file_ext":"py","file_size_in_byte":8940,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"8526608861","text":"\"\"\"\nPytest file for get_simulated_data.py\n\nIt tests :\n- The dimensions of the outputs\n- Whether they should be binary or not\n- Whether the effects are coherent\n- Whether forbidden inputs return an error\n- Whether aberrant behaviors happen (NaN, unexpected error...)\n\nReminder :\np_t = P(T=1|X)\nth_p_t_mx = P(T=1|X,M)\n\"\"\"\n\nfrom pprint import pprint\nimport itertools\nimport pytest\nimport numpy as np\nfrom numpy.random import default_rng\nfrom med_bench.src.get_simulated_data import simulate_data\n\n\nPARAMETER_NAME = [\n \"n\",\n \"rg\",\n \"mis_spec_m\",\n \"mis_spec_y\",\n \"dim_x\",\n \"dim_m\",\n \"seed\",\n \"type_m\",\n \"sigma_y\",\n \"sigma_m\",\n \"beta_t_factor\",\n \"beta_m_factor\",\n]\n\n\nPARAMETER_LIST = list(\n itertools.product(\n [1, 500, 1000],\n [default_rng(321)],\n [False, True],\n [False, True],\n [1, 5],\n [1],\n [123],\n [\"binary\", \"continuous\"],\n [0.5],\n [0.5],\n [0.5],\n [0.5],\n )\n)\n\n\n@pytest.fixture(params=PARAMETER_LIST)\ndef dict_param(request):\n return dict(zip(PARAMETER_NAME, request.param))\n\n\n@pytest.fixture\ndef data(dict_param):\n return simulate_data(**dict_param)\n\n\n@pytest.fixture\ndef x(data):\n return data[0]\n\n\n@pytest.fixture\ndef t(data):\n return data[1].ravel()\n\n\n@pytest.fixture\ndef m(data):\n return data[2]\n\n\n@pytest.fixture\ndef y(data):\n return data[3].ravel()\n\n\n@pytest.fixture\ndef effects(data):\n return np.array(data[4:9])\n\n\ndef test_dimension_x(x, dict_param):\n assert x.shape == (dict_param[\"n\"], dict_param[\"dim_x\"])\n\n\ndef test_dimension_t(t, dict_param):\n assert t.shape == (dict_param[\"n\"],)\n\n\ndef test_dimension_m(m, dict_param):\n assert m.shape == (dict_param[\"n\"], dict_param[\"dim_m\"])\n\n\ndef test_dimension_y(y, dict_param):\n assert y.shape == (dict_param[\"n\"],)\n\n\ndef test_m_is_binary(m, dict_param):\n if dict_param[\"type_m\"] == \"binary\":\n assert (\n sum(m.ravel() == 1) + sum(m.ravel() == 0)\n == dict_param[\"n\"] * dict_param[\"dim_m\"]\n )\n else:\n assert (\n sum(m.ravel() == 1) + sum(m.ravel() == 0)\n < dict_param[\"n\"] * dict_param[\"dim_m\"]\n )\n\n\ndef test_total_is_direct_plus_indirect(effects):\n # total = theta_1 + delta_0\n assert effects[0] == pytest.approx(effects[1] + effects[4])\n # total = theta_0 + delta_1\n assert effects[0] == pytest.approx(effects[2] + effects[3])\n\n\ndef test_effects_are_equals_if_y_well_specified(effects, dict_param):\n if dict_param[\"mis_spec_y\"]:\n assert effects[1] != pytest.approx(effects[2])\n assert effects[3] != pytest.approx(effects[4])\n else:\n assert effects[1] == pytest.approx(effects[2])\n assert effects[3] == pytest.approx(effects[4])\n\n\n# n=0 : Warnings\n@pytest.mark.xfail\ndef test_n_null_should_fail():\n with pytest.raises(ValueError):\n simulate_data(\n n=0,\n rg=default_rng(42),\n mis_spec_m=False,\n mis_spec_y=False,\n dim_x=1,\n dim_m=1,\n seed=1,\n type_m=\"binary\",\n sigma_y=0.5,\n sigma_m=0.5,\n beta_t_factor=1,\n beta_m_factor=1,\n )\n\n\n# n<0 : l19 ; ValueError: negative dimensions are not allowed\n@pytest.mark.xfail\ndef test_n_negative_should_fail():\n with pytest.raises(ValueError):\n simulate_data(\n n=-1,\n rg=default_rng(42),\n mis_spec_m=False,\n mis_spec_y=False,\n dim_x=1,\n dim_m=1,\n seed=1,\n type_m=\"binary\",\n sigma_y=0.5,\n sigma_m=0.5,\n beta_t_factor=1,\n beta_m_factor=1,\n )\n\n\n# dim_x=0 : No Warning\n@pytest.mark.xfail\ndef test_dim_x_null_should_fail():\n with pytest.raises(ValueError):\n simulate_data(\n n=10,\n rg=default_rng(42),\n mis_spec_m=False,\n mis_spec_y=False,\n dim_x=0,\n dim_m=1,\n seed=1,\n type_m=\"binary\",\n sigma_y=0.5,\n sigma_m=0.5,\n beta_t_factor=1,\n beta_m_factor=1,\n )\n\n\n# dim_m=0 ; l134 : ValueError\n@pytest.mark.xfail\ndef test_dim_m_null_should_fail():\n with pytest.raises(ValueError):\n simulate_data(\n n=10,\n rg=default_rng(42),\n mis_spec_m=False,\n mis_spec_y=False,\n dim_x=1,\n dim_m=0,\n seed=1,\n type_m=\"binary\",\n sigma_y=0.5,\n sigma_m=0.5,\n beta_t_factor=1,\n beta_m_factor=1,\n )\n\n\n# dim_x<0 : l115 ; ValueError: negative dimensions are not allowed\n@pytest.mark.xfail\ndef test_dim_x_negative_should_fail():\n with pytest.raises(ValueError):\n simulate_data(\n n=10,\n rg=default_rng(42),\n mis_spec_m=False,\n mis_spec_y=False,\n dim_x=-1,\n dim_m=1,\n seed=1,\n type_m=\"binary\",\n sigma_y=0.5,\n sigma_m=0.5,\n beta_t_factor=1,\n beta_m_factor=1,\n )\n\n\n# dim_m<0 : l123 ; ValueError: negative dimensions are not allowed\n@pytest.mark.xfail\ndef test_dim_m_negative_should_fail():\n with pytest.raises(ValueError):\n simulate_data(\n n=10,\n rg=default_rng(42),\n mis_spec_m=False,\n mis_spec_y=False,\n dim_x=1,\n dim_m=-1,\n seed=1,\n type_m=\"binary\",\n sigma_y=0.5,\n sigma_m=0.5,\n beta_t_factor=1,\n beta_m_factor=1,\n )\n\n\n# dim_m>1 ; n>1 ; \"binary\" ; l39\n@pytest.mark.xfail\ndef test_m_multidimensional_binary_works():\n try:\n simulate_data(\n n=7,\n rg=default_rng(42),\n mis_spec_m=False,\n mis_spec_y=False,\n dim_x=1,\n dim_m=3,\n seed=1,\n type_m=\"binary\",\n sigma_y=0.5,\n sigma_m=0.5,\n beta_t_factor=1,\n beta_m_factor=1,\n )\n except ValueError as err:\n pprint(err)\n assert False\n else:\n pass\n\n\n# dim_m>1 ; n=1 ; l58\n@pytest.mark.xfail\ndef test_m_multidimensional_binary_works1():\n try:\n simulate_data(\n n=1,\n rg=default_rng(42),\n mis_spec_m=False,\n mis_spec_y=False,\n dim_x=1,\n dim_m=2,\n seed=1,\n type_m=\"binary\",\n sigma_y=0.5,\n sigma_m=0.5,\n beta_t_factor=1,\n beta_m_factor=1,\n )\n except ValueError as err:\n pprint(err)\n assert False\n else:\n pass\n\n\n# sigma_m large ; \"continuous\" ; P(T=1|X,M) = NaN\n@pytest.mark.xfail\ndef test_huge_sigma_m_makes_nan():\n with pytest.raises(Warning):\n data_temp = simulate_data(\n n=1,\n rg=default_rng(42),\n mis_spec_m=True,\n mis_spec_y=False,\n dim_x=1,\n dim_m=1,\n seed=1,\n type_m=\"continuous\",\n sigma_y=0.5,\n sigma_m=5351,\n beta_t_factor=1,\n beta_m_factor=1,\n )\n assert data_temp[10] != np.nan\n\n\n# sigma_m=0 ; \"continuous\" ; P(T=1|X,M) = NaN\n@pytest.mark.xfail\ndef test_null_sigma_m_makes_nan():\n with pytest.raises(Warning):\n data_temp = simulate_data(\n n=1,\n rg=default_rng(42),\n mis_spec_m=False,\n mis_spec_y=False,\n dim_x=1,\n dim_m=1,\n seed=1,\n type_m=\"continuous\",\n sigma_y=0.5,\n sigma_m=0,\n beta_t_factor=1,\n beta_m_factor=1,\n )\n assert data_temp[10] != np.nan\n","repo_name":"judithabk6/med_bench","sub_path":"tests/simulate_data/test_get_simulated_data.py","file_name":"test_get_simulated_data.py","file_ext":"py","file_size_in_byte":7768,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"13102382157","text":"\"\"\"\ncharmdoctools diff\n\nThis wraps some functions to diff different versions of docs.\n\nPrimarily, this is intended to check a local version against a \nversion stored elsewhere\n\"\"\"\nimport click\nimport validators\nimport os\nimport difflib\nfrom .metadata import get, strip, put\nfrom .discourse import get_md\n\ndef source_to_text(src) -> list:\n \"\"\"Checks supplied string to see if it is a url, local file etc, and return the appropriate text from that location\"\"\"\n text = list()\n if os.path.exists(src):\n \"\"\"supplied arg is a filename\"\"\"\n with open(src) as f:\n text = f.readlines()\n elif validators.url(src):\n \"\"\"supplied arg is url\"\"\"\n text = str(get_md(src))\n else:\n click.echo(\" Could not determine or find input source, please check thespplied arguments and try again.\")\n return text\n\ndef diff_docs(file1, file2, quiet):\n text1 = source_to_text(file1)\n text2 = source_to_text(file2)\n outlines = list(difflib.unified_diff(text1,text2,fromfile=file1,tofile=file2))\n return(outlines)\n\n","repo_name":"evilnick/charmdoctools","sub_path":"charmdoctools/diff.py","file_name":"diff.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"9126513645","text":"import argparse\nimport pandas as pd\n\nimport sys\nsys.path.append( '../util' )\nimport util\n\n\n#######################################\n\n\n# Main program\nif __name__ == '__main__':\n\n # Retrieve and validate arguments\n parser = argparse.ArgumentParser( description='Find occurrences of multiple spaces in all tables of master database' )\n parser.add_argument( '-m', dest='master_filename', help='Master database filename' )\n args = parser.parse_args()\n\n # Open the master database\n conn, cur, engine = util.open_database( args.master_filename, False )\n\n # Fetch names of all tables\n cur.execute( 'SELECT name FROM sqlite_master WHERE type=\"table\";' )\n rows = cur.fetchall()\n\n # Iterate through tables\n for row in rows:\n\n table_name = row[0]\n\n if table_name != 'sqlite_sequence':\n\n df = pd.read_sql_table( table_name, engine, index_col=util.ID, parse_dates=True )\n\n print( '\\n--- {0} ---'.format( table_name ) )\n\n for col_name in df.columns:\n try:\n sr = df[col_name].str.contains( ' ' )\n b_has = True in sr.value_counts()\n except:\n b_has = False\n\n if b_has:\n print( col_name )\n\n\n # Report elapsed time\n util.report_elapsed_time()\n","repo_name":"navkal/el","sub_path":"populators/find_spaces.py","file_name":"find_spaces.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36075910212","text":"import os\nimport re\n\nDEBUG = os.getenv('DEBUG', False)\n\n# no unknown commands ($commandName)\nBEGIN_TEMPLATE_CMD = 'template'\nCOMPUTE_CMD = 'compute'\nEND_TEMPLATE_CMD = 'endtemplate'\nEXPAND_BY_CMD = 'by'\nEXPAND_CMD = 'expand'\nEXPAND_WITH_CMD = 'with'\nFOREACH_CMD = 'foreach'\nIF_CMD = 'if'\n\nKEYWORDS = [\n '', # ${VAR} has empty keyword\n BEGIN_TEMPLATE_CMD,\n COMPUTE_CMD,\n END_TEMPLATE_CMD,\n EXPAND_BY_CMD,\n EXPAND_CMD,\n EXPAND_WITH_CMD,\n FOREACH_CMD,\n IF_CMD,\n]\n\nARGUMENT_PREFIX = r'[\\{\\s]'\nKEYWORD_PREFIX = '\\$' # Escape special char for RegEx\nVAR_PREFIX = '\\$\\{'\nVAR_SUFFIX = '\\}'\nVAR_REGEX = r'' + VAR_PREFIX + r'[^\\$]*' + VAR_SUFFIX\n\ndef update_template_state(cmd, template_state, line_number):\n if 'template' in cmd:\n if template_state:\n raise RuntimeError(f'Nested template state not supported at line: {line_number}')\n template_state = True\n elif 'endtemplate' in cmd:\n if not template_state:\n raise RuntimeError(f'End of template but never begin at line: {line_number}')\n template_state = False\n return template_state\n\ndef extract_command(line, line_number):\n line = line.rstrip()\n cmd = []\n for i in re.finditer(KEYWORD_PREFIX, line):\n subline = line[i.end():]\n\n keyword_end = re.search(ARGUMENT_PREFIX, subline)\n if keyword_end is None:\n k = subline\n else:\n k = subline[:keyword_end.start()]\n\n if k not in KEYWORDS:\n raise RuntimeError(f'Unidentified keyword: {k} in line #{line_number}: {line}')\n\n if k != '':\n if DEBUG:\n print(f'Command found at {line}: {k}')\n\n cmd.append(k)\n\n if DEBUG and len(cmd) > 0:\n print(cmd)\n return cmd\n\ndef extract_variable(line, line_number):\n line = line.rstrip()\n vars = []\n for i in re.finditer(VAR_PREFIX, line):\n subline = line[i.end():]\n var_end = re.search(VAR_SUFFIX, subline)\n if var_end is None:\n raise RuntimeError(f'Unmatched variable definition in line #{line_number}: {line}')\n var = subline[:var_end.start()]\n\n if DEBUG:\n print(f'Variable found at {line}: {var}')\n\n vars.append(var)\n\n return vars\n\n\ndef process_foreach(line_in, var_lookup):\n line = line_in\n if '$foreach' not in line:\n return line\n # Expand foreach term\n # m = re.search(r'\\$foreach\\{([^\\{\\}]*)\\}\\{([^\\{\\}]*)\\}\\{(.*)\\}\\$with\\{([^\\{\\}]*)\\}', line)\n m = re.search(r'([^\\{\\}]*)\\$foreach\\{([^\\{\\}]*)\\}\\{([^\\{\\}]*)\\}\\{(.*)\\}\\$with\\{([^\\{\\}]*)\\}(.*)', line)\n pre_foreach = m.group(1)\n term = m.group(2)\n subject = m.group(3)\n body_og = m.group(4)\n concat = m.group(5)\n post_foreach = m.group(6)\n\n print('body_og: ', body_og)\n body = process_foreach(body_og, var_lookup)\n\n print('line: ', line)\n print('term: ',term)\n print('subject: ', subject)\n print('body: ', body)\n print('concat: ', concat)\n\n replace = ''\n for sub in var_lookup[subject]:\n body_sub = re.sub(r'\\$\\{' + term + r'\\}', sub, body)\n m_if = re.search(r'(.*)\\$if\\{(.*)\\}\\{(.*)\\}\\{(.*)\\}(.*)', body_sub)\n pre_if = m_if.group(1)\n cond = m_if.group(2)\n clause_t = m_if.group(3)\n clause_f = m_if.group(4)\n post_if = m_if.group(5)\n\n clauss = ''\n if eval(cond):\n clause = clause_t\n else:\n clause = clause_f\n replace = replace + pre_if + clause + post_if + concat\n replace = replace[:-len(concat)]\n\n pos = re.search(r'[^\\$\\{\\}]*(\\$foreach.*\\$with\\{[^\\$\\{\\}]\\}).*', line)\n # print('group: ', pos.group(1))\n print('before: ', line.rstrip())\n # line = re.sub(pos.group(1), replace, line)\n line = line.replace(pos.group(1), replace)\n print('replace: ', replace)\n print('pos: ', pos.group(1))\n print('after: ', line.rstrip())\n print()\n return line\n","repo_name":"synergylabs/TEO-release","sub_path":"proverif/compile_utils.py","file_name":"compile_utils.py","file_ext":"py","file_size_in_byte":3909,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"12995129265","text":"import respgen\n\n# before running, using os.environ, specify os.environ['ACCESS_ID'] = 'your id', os.environ['ACCESS_KEY'] = 'your secret'\n# imgurl='https://upload.wikimedia.org/wikipedia/commons/7/7e/1993ToyotaMR2Hardtop.jpg'\n# imgurl2='https://upload.wikimedia.org/wikipedia/commons/1/13/Red_2019_Ferrari_SF90_Stradale_%2848264238897%29_%28cropped%29.jpg'\n\ndef main():\n while True:\n imgurl = input('Enter the image URL: ')\n if '.jpg' in imgurl or '.png' in imgurl:\n break\n else:\n print('Invalid image url, please try again.')\n continue\n \n while True:\n testlabel = input('Enter keyword label: ')\n if testlabel == '':\n print('Empty label, please try again.')\n continue\n else:\n break\n print(testlabel, imgurl)\n print('Labels generating...')\n try:\n respgen.generate_data(imgurl, 'URL' if 'http' in imgurl else 'LOCAL' )\n except:\n print(\"\")\n return testlabel.lower()\n\n","repo_name":"jlow64/CarRekognitionAWS","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"74449418248","text":"import json\n\nimport pulsar\nfrom submodules.utils.logger import Logger\n\nlogger = Logger()\n\n\nclass Producer:\n\n def __init__(self, config):\n self.config = config\n client = pulsar.Client(self.config.serverUrl)\n self.producer = client.create_producer(\n self.config.topic,\n block_if_queue_full=True,\n batching_enabled=True,\n batching_max_publish_delay_ms=10\n )\n\n def send_callback(self, res, msg):\n logger.info(f\"pulsar producer callback: {res} {msg}\")\n\n async def push(self, message):\n message = json.dumps(message).encode()\n self.producer.send_async(message, self.send_callback)\n\n async def cleanup(self):\n pass\n","repo_name":"lisper-inmove/MessageQueue","sub_path":"pulsar_mq/producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"40403940073","text":"from pathlib import Path\nimport random\n\n\npath = Path('./word_list.txt')\n\n\ndef archive_lines():\n if path.is_file():\n with open(path, 'r') as f:\n lines = (f.readlines())\n return lines\n else:\n raise Exception(\"Word list archive doesn't exist 😞\")\n\n\ndef read_random_word():\n lines = archive_lines()\n n_words = len(lines)\n word_line = random.randint(0, n_words)\n return lines[word_line]\n\n\n\n","repo_name":"vcaitite/hangman","sub_path":"read_txt_archive.py","file_name":"read_txt_archive.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"28936343458","text":"def array_search(A:list, N:int, x:int):\n \"\"\"\n Searches for the number x in array A\n from 0 to N-1 index inclusive.\n Returns the index of element x in array A.\n Or -1 if there is none.\n If there are several identical elements in the array,\n equal to x, then return the index of the first in a row.\n \"\"\"\n for k in range(N):\n if A[k] == x:\n return k\n return -1\n\n\ndef test_array_search():\n A1 = [1, 2, 3, 4, 5]\n m = array_search(A1, 5, 8)\n if m == -1:\n print(\"#test1 - ok\")\n else:\n print(\"#test1 - fail\")\n\n\n A2 = [-1, -2, -3, -4, -5]\n m = array_search(A2, 5, -3)\n if m == 2:\n print(\"#test2 - ok\")\n else:\n print(\"#test2 - fail\")\n\n\n A3 = [10, 20, 30, 10, 10]\n m = array_search(A3, 5, 10)\n if m == 0:\n print(\"#test3 - ok\")\n else:\n print(\"#test3 - fail\")\n\n\ntest_array_search()\n","repo_name":"Hoasker/learning_python","sub_path":"lec_5/test_linear_search.py","file_name":"test_linear_search.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37666738638","text":"\"\"\"\nFile I/O: File Display\nReturns numbers from a file\n\"\"\"\nfile_name = 'numbers.txt'\nprint('This program displays numbers read from a file.')\nwith open('numbers.txt') as file:\n for i in file:\n print(\"Number:\", int(i))\nfile.close()","repo_name":"travesto/CS116","sub_path":"filedisplay.py","file_name":"filedisplay.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"23735015945","text":"import cv2\nimport numpy as np\n\ndef nothing(x):\n pass\n\n# Create a black image, a window\nimg = np.zeros((300,512,3), np.uint8)\n\nimagen_bgr = cv2.imread('prubeaazul2.png')\ncv2.namedWindow ('Video')\ncv2.createTrackbar ('H min', 'Video', 107, 180, nothing)\ncv2.createTrackbar ('S min', 'Video', 121, 255, nothing)\ncv2.createTrackbar ('V min', 'Video', 75, 255, nothing)\ncv2.createTrackbar ('H max', 'Video', 110, 180, nothing)\ncv2.createTrackbar ('S max', 'Video', 255, 255, nothing)\ncv2.createTrackbar ('V max', 'Video', 255, 255, nothing)\n\nkernel = np.ones((3,3),np.uint8)\n\nimagen_bgr = imagen_bgr[100:440, 140:1050]\n\nwhile(1):\n #imagen_bgr = cv2.flip(cv2.transpose(imagen_bgr), 1)\n #imagen_bgr = imagen_bgr[180:500, 0:]\n hsv = cv2.cvtColor (imagen_bgr, cv2.COLOR_BGR2HSV)\n\n #mask = cv2.inRange (hsv, np.array ([Hmin, Smin, Vmin]), np.array ([Hmax, Smax, Vmax]))\n mask = cv2.inRange (hsv, \n np.array ([cv2.getTrackbarPos ('H min', 'Video'), cv2.getTrackbarPos ('S min', 'Video'), cv2.getTrackbarPos ('V min', 'Video')]),\n np.array ([cv2.getTrackbarPos ('H max', 'Video'), cv2.getTrackbarPos ('S max', 'Video'), cv2.getTrackbarPos ('V max', 'Video')]))\n #mascara_rojo1 = cv2.inRange(hsv, rojo_bajos1, rojo_altos1)\n #mask = cv2.bitwise_and(imagen_bgr,imagen_bgr, mask= mask)\n mask = cv2.dilate (mask, cv2.getStructuringElement (cv2.MORPH_RECT, (5, 5)), iterations = 1)\n mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)\n mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)\n\n\n Hmin = cv2.getTrackbarPos ('H min', 'Video')\n Smin = cv2.getTrackbarPos ('S min', 'Video')\n Vmin = cv2.getTrackbarPos ('V min', 'Video')\n Hmax = cv2.getTrackbarPos ('H max', 'Video')\n Smax = cv2.getTrackbarPos ('S max', 'Video')\n Vmax = cv2.getTrackbarPos ('V max', 'Video')\n cv2.imshow('imagen',imagen_bgr)\n #cv2.imshow ('hsv', hsv)\n\n cv2.imshow ('Video', mask)\n\n\n\n k = cv2.waitKey(1) & 0xFF\n if k == 27:\n break\ncv2.destroyAllWindows()","repo_name":"brayanjav28/brayanjav28.github.com","sub_path":"Escritorio/Manipulación de imagenes - Python/Pruebas/encontrandocolores.py","file_name":"encontrandocolores.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"15139513685","text":"# bit全探索\r\nn = 4\r\nlst = [[2,3],[1,2],[3,4],[2,2]]\r\nw = 5\r\n\r\nans = 0\r\nfor i in range(2 ** n):\r\n weight = 0\r\n value = 0\r\n for j in range(n):\r\n if 1 & (i >> j):\r\n weight += lst[j][0]\r\n value += lst[j][1]\r\n if weight <= w:\r\n ans = max(ans, value)\r\n\r\nprint(ans)","repo_name":"willcom1992/antbook","sub_path":"初級2-3_p52_01ナップザック問題.py","file_name":"初級2-3_p52_01ナップザック問題.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"17710803984","text":"class Node:\n def __init__(self,key, data):\n self.key = key\n self.data = data #이번에는 데이터를 가지는 노드로 만들 것이므로 이렇게 생성\n self.left = None\n self.right = None\n\n def inorder(self):\n traversal = []\n if self.left:\n traversal += self.left.inorder()\n traversal.append(self) #노드들의 리스트를 만들어서 리턴해준다.\n if self.right:\n traversal += self.right.inorder()\n return traversal\n \n def min(self):\n if self.left:\n return self.left.min()\n else:\n self\n \n def max(self):\n if self.right:\n return self.right.max()\n else:\n self\n\n def lookup(self, key, parent=None):\n if key < self.key: #ke가 현재보다 작으면 왼쪽으로 감\n if self.left:\n return self.left.lookup(key,self) #self.node의 parent는 self 이므로 parent로 Self를 넘김\n else:\n return None,None\n \n elif key > self.key:\n if self.right:\n return self.right.lookup(key, self)\n else:\n return None, None\n else: #찾으려는 노드 발견\n return self,parent #slef -> 찾아진 노드 , parent가 Parent이므로 그대로 리턴\n\n\n #작으면 왼쪽으로 가고 insert메서드 재귀적으로 호출하거나 왼쪽 또는 오른쪽 서브트리가 없으면 삽입해야할 위치를 발견한 것이므로 새로운 노드를 만들어 달아주면 됨.\n def insert(self, key, data):\n if key < self.key: \n #현재 노드보다 가지고 있는데이터가 작은 경우 -> 왼쪽으로 가야하는 케이스인 경우\n if self.left:\n return self.left.insert(key, data)\n else:\n self.left = Node(key,data)\n elif key > self.key:\n if self.right:\n return self.right.insert(key,data)\n else:\n self.right = Node(key,data)\n else:\n raise KeyError('중복된 키에러 발생')\n\n def countChildren(self): #노드의 자식 개수 세기\n count = 0\n if self.left:\n count += 1\n if self.right:\n count += 1\n return count\n\nclass BinSearchTree:\n def __init__(self):\n self.root = None\n \n def inorder(self):\n if self.root:\n return self.root.inorder()\n else:\n return []\n\n def min(self):\n if self.root:\n return self.root.min()\n else:\n return None\n \n def max(self):\n if self.root:\n return self.root.max()\n else:\n return None\n\n def lookup(self,key): #입력인자: 찾으려는 대상 키, 리턴: 찾은 노드와 그것의 부모노드(왜? -> 원소삭제에서 알 필요가 있음) 없으면 각 각 None\n if self.root:\n return self.root.lookup(key)\n else:\n return None, None\n \n def insert(self, key, data): #key ,data를 입력인자로 받고 \n if self.root: \n self.root.insert(key, data) #존재하는 데이터면 재귀적으로 Node에 추가하자\n else:\n self.root = Node(key, data) #없으면 이제 새로운 노드를 만들어서 루트로 만들어줌\n \n def remove(self, key):\n node, parent = self.lookup(key) #여기서 node가 지울애이고 parent가 그 부모노드\n if node:\n nChildren = node.countChildren()\n # The simplest case of no children\n if nChildren == 0:\n # 만약 parent 가 있으면\n # node 가 왼쪽 자식인지 오른쪽 자식인지 판단하여\n # parent.left 또는 parent.right 를 None 으로 하여\n # leaf node 였던 자식을 트리에서 끊어내어 없앱니다.\n if parent:\n if parent.left == node:\n parent.left = None\n else:\n parent.right = None\n # 만약 parent 가 없으면 (node 는 root 인 경우)\n # self.root 를 None 으로 하여 빈 트리로 만듭니다.\n else:\n self.root = None\n # When the node has only one child\n elif nChildren == 1:\n # 하나 있는 자식이 왼쪽인지 오른쪽인지를 판단하여\n # 그 자식을 어떤 변수가 가리키도록 합니다.\n child = None\n if node.left:\n child = node.left\n else:\n child = node.right\n # 만약 parent 가 있으면\n # node 가 왼쪽 자식인지 오른쪽 자식인지 판단하여\n # 위에서 가리킨 자식을 대신 node 의 자리에 넣습니다.\n if parent:\n if parent.left == node:\n parent.left = child\n else:\n parent.right = child\n # 만약 parent 가 없으면 (node 는 root 인 경우)\n # self.root 에 위에서 가리킨 자식을 대신 넣습니다.\n else:\n self.root = child\n # When the node has both left and right children\n else:\n # parent 는 node 를 가리키고 있고,\n # successor 는 node 의 오른쪽 자식을 가리키고 있으므로\n parent = node\n successor = node.right\n\n # successor 로부터 왼쪽 자식의 링크를 반복하여 따라감으로써\n # 순환문이 종료할 때 successor 는 바로 다음 키를 가진 노드를,\n # 그리고 parent 는 그 노드의 부모 노드를 가리키도록 찾아냅니다.\n # 하나 큰 키의 노드를 찾기 위해서는 삭제하려는 노드의 오른쪽 노드로 가서\n # 제일 왼쪽 노드까지 반복하게 되면 찾을 수 있게 되는 구조가 이진탐색트리임.\n while successor.left:\n parent = successor\n successor = successor.left\n \n # 삭제하려는 노드인 node 에 successor 의 key 와 data 를 대입합니다.\n node.key = successor.key\n node.data = successor.data\n # 이제, successor 가 parent 의 왼쪽 자식인지 오른쪽 자식인지를 판단하여\n # 그에 따라 parent.left 또는 parent.right 를\n # successor 가 가지고 있던 (없을 수도 있지만) 자식을 가리키도록 합니다.\n if parent.left == successor:\n parent.left = successor.right #successor는 애초에 오른쪽에 값이 할당되어있는놈이므로\n else:\n parent.right = successor.right\n\n return True\n\n else:\n return False\n\n\n","repo_name":"sanghunlee-711/Study-Algorithm","sub_path":"Lecture/Welcome/21강-이진탐색트리2/21-1.solution.py","file_name":"21-1.solution.py","file_ext":"py","file_size_in_byte":6665,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"15551398598","text":"import tensorflow as tf\n\n# 定义4个图片路径列表\nimages = ['image1.jpg', 'image2.jpg', 'image3.jpg', 'image4.jpg']\n# 定义4个Label的列表\nlabels = [1, 2, 3, 4]\n\n# tf.train.slice_input_producer是一个tensor生成器,作用是按照设定,每次从一个tensor列表中按顺序或者随机抽取出一个tensor放入文件名队列。 slice_input_producer(\n# tensor_list, num_epochs=None, shuffle=True, seed=None, capacity=32, shared_name=None, name=None) 第一个参数\n# tensor_list:包含一系列tensor的列表,表中tensor的第一维度的值必须相等,即个数必须相等,有多少个图像,就应该有多少个对应的标签。 第二个参数num_epochs:\n# 可选参数,是一个整数值,代表迭代的次数,如果设置 num_epochs=None,生成器可以无限次遍历tensor列表,如果设置为 num_epochs=N,生成器只能遍历tensor列表N次。 第三个参数shuffle:\n# bool类型,设置是否打乱样本的顺序。一般情况下,如果shuffle=True,生成的样本顺序就被打乱了,在批处理的时候不需要再次打乱样本,使用 tf.train.batch函数就可以了;如果shuffle=False,\n# 就需要在批处理时候使用 tf.train.shuffle_batch函数打乱样本。\n# 第四个参数seed: 可选的整数,是生成随机数的种子,在第三个参数设置为shuffle=True的情况下才有用。\n# 第五个参数capacity:设置tensor列表的容量。\n# 第六个参数shared_name:可选参数,如果设置一个‘shared_name’,则在不同的上下文环境(Session)中可以通过这个名字共享生成的tensor。\n# 第七个参数name:可选,设置操作的名称。\n[images, labels] = tf.train.slice_input_producer([images, labels],\n num_epochs=2,\n shuffle=True)\n\nwith tf.Session() as sess:\n # 对全局的变量进行初始化\n sess.run(tf.local_variables_initializer())\n\n # TensorFlow的Session对象是支持多线程的,可以在同一个会话(Session)中创建多个线程,并行执行。在Session中的所有线程都必须能被同步终止,异常必须能被正确捕获并报告,会话终止的时候,\n # 队列必须能被正确地关闭。 TensorFlow提供了两个类来实现对Session中多线程的管理:tf.Coordinator和 tf.QueueRunner,这两个类往往一起使用。\n # Coordinator类用来管理在Session中的多个线程,可以用来同时停止多个工作线程并且向那个在等待所有工作线程终止的程序报告异常,该线程捕获到这个异常之后就会终止所有线程。使用\n # tf.train.Coordinator()来创建一个线程管理器(协调器)对象。\n # QueueRunner类用来启动tensor的入队线程,可以用来启动多个工作线程同时将多个tensor(训练数据)推送入文件名称队列中,具体执行函数是 tf.train.start_queue_runners , 只有调用\n # tf.train.start_queue_runners 之后,才会真正把tensor推入内存序列中,供计算单元调用,否则会由于内存序列为空,数据流图会处于一直等待状态。\n tf.train.start_queue_runners(sess=sess) # 启动队列填充的线程\n\n for i in range(8): # 从文件队列中获取数据, 4 * 2epoch\n print(sess.run([images, labels]))\n","repo_name":"cwyd0822/cifar10-tensorflow-read-write","sub_path":"reader_cifar10-1.py","file_name":"reader_cifar10-1.py","file_ext":"py","file_size_in_byte":3337,"program_lang":"python","lang":"zh","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"40262394962","text":"import boto3\nfrom boto.kinesis.exceptions import ResourceInUseException\nimport os\nimport time\n\nif aws_profile:\n os.environ['AWS_PROFILE'] = aws_profile\n\n# connect to the kinesis\nkinesis = boto.kinesis.connect_to_region(region)\n\ndef get_status():\n r = kinesis.describe_stream(stream_name)\n description = r.get('StreamDescription')\n status = description.get('StreamStatus')\n return status\n\ndef create_stream(stream_name):\n try:\n # create the stream\n kinesis.create_stream(stream_name, 1)\n print('stream {} created in region {}'.format(stream_name, region))\n except ResourceInUseException:\n print('stream {} already exists in region {}'.format(stream_name, region))\n\n\n # wait for the stream to become active\n while get_status() != 'ACTIVE':\n time.sleep(1)\n print('stream {} is active'.format(stream_name))","repo_name":"Ritakushwaha/Twitter_Data_Streaming","sub_path":"twitter_data_with_kinesis/kinesis_consumer.py","file_name":"kinesis_consumer.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"71713498888","text":"import pandas as pd\nimport numpy as np\nimport math\nfrom scipy.special import erfinv, erf\nfrom scipy.signal import welch\n\ndef gamma (phi, W2,m,h,e,n,k):\n #modal participation coefficient for displacement\n ga_d = phi\n\n #modal participation coefficient for acceleration\n\n ga_a = np.multiply(phi,W2)\n\n #modal participation coefficient for storey shear\n\n ga_v = np.zeros((2*n,k))\n\n for i in range(n):\n for l in range(n):\n for j in range(k):\n ga_v[i,j] = ga_v[i,j] + m[l,l]*phi[l,j]*W2[j,j]\n ga_v[n+i,j] = ga_v[n+i,j] + m[n+l,n+l]*phi[n+l,j]*W2[j,j]\n \n #modal participation coefficient for bending moment\n\n ga_m = np.zeros((2*n,k))\n\n for i in range(n):\n for l in range(n):\n for j in range(k):\n if i==1:\n ga_m[i,j]= ga_m[i,j]+ h[l]*m[l,l]*phi[l,j]*W2[j,j]\n ga_m[n+i,j]=ga_m[n+i,j]+ h[l]*m[n+l,n+l]*phi[n+l,j]*W2[j,j]\n else:\n ga_m[i,j]= ga_m[i,j]+ (h[l]-h[i-1])*m[l,l]*phi[l,j]*W2[j,j]\n ga_m[n+i,j]=ga_m[n+i,j]+ (h[l]-h[i-1])*m[n+l,n+l]*phi[n+l,j]*W2[j,j]\n\n #modal participation coefficient for torsion\n\n ga_t = np.zeros((n,k))\n\n for i in range(n):\n for l in range(n):\n for j in range(k):\n ga_t[i,j] = ga_t[i,j] + (-e[l,2]*m[l,l]*phi[l,j]+e[l,1]*m[n+l,n+l]*phi[n+l,j]+m[2*n+l,2*n+l]*phi(2*n+l,j))*W2(j,j)\n \n\n return ga_d, ga_a, ga_v, ga_m, ga_t\n\ndef isnc(p):\n \n return math.sqrt(2)*erfinv(p)\n\ndef mat_diag(A,n):\n B = np.zeros((1,1))\n for i in range(n):\n \n B[i,i] = A[i]\n return B\ndef cdf_std_norm(x):\n return 0.5*(1+erf(x/math.sqrt(2)))\n\ndef q_explicit (M,P,C,K,k,N,dt):\n q = np.zeros((1,1))\n qd = np.zeros((1,1))\n qdd = np.zeros((1,1))\n for i in range(k):\n for j in range(N):\n if j ==1:\n q[i,j] = 0.0\n qd[i,j] = 0.0\n qdd[i,j] = (1/M([i,i]))*(P[i,j]-C[i,i]*qd[i,j]-K[i,i]*q[i,j])\n\ndef q_implicit (M,P,C,K,k,N,dt):\n\n ga = 0.5\n be = 0.166667\n\n dpp=0.0\n dq=0.0\n dqd=0.0\n dqdd=0.0\n\n q = np.zeros((k,N))\n qd = np.zeros((k,N))\n qdd = np.zeros((k,N))\n KP = np.zeros((k,k))\n a = np.zeros((k,k))\n b = np.zeros((k,k))\n\n for i in range(k):\n for j in range(k):\n KP[i,j] = K[i,j]+(ga/(be*dt))*(C[i,j]+(1/(be*(dt*dt)))*M[i,j])\n a[i,j] = (1/(be*dt))*M[i,j]+(ga/be)*C[i,j]\n b[i,j] = (1/(2*be))*M[i,j]+dt*((ga/(2*be))-1)*C[i,j]\n\n for i in range(k):\n for j in range(N):\n if j == 1:\n dpp = P[i,j]+a[i,i]*qd[i,j]+b[i,i]*qdd[i,j]\n\n dq = (1/KP[i,i])*dpp\n dqd = (ga/(be*dt))*dq-(ga/be)*qd[i,j]+dt*(1-(ga/(2*be)))*qdd[i,j]\n dqdd =(1/(be*dt*dt))*dq-(1/(be*dt))*qd[i,j]-(1/(2*be))*qdd[i,j]\n\n\n q[i,j]=q[i,j]+dq\n qd[i,j]=qd[i,j]+dqd\n qdd[i,j]=qdd[i,j]+dqdd\n else:\n q[i,j]=q[i,j-1]\n qd[i,j]=qd[i,j-1]\n qdd[i,j]=qdd[i,j-1]\n\n dpp=P[i,j]-P[i,j-1]+a[i,i]*qd[i,j]+b[i,i]*qdd[i,j]\n dq = (1/KP[i,i])*dpp\n dqd = (ga/(be*dt))*dq-(ga/be)*qd[i,j]+dt*(1-(ga/(2*be)))*qdd[i,j]\n dqdd =(1/(be*dt*dt))*dq-(1/(be*dt))*qd[i,j]-(1/(2*be))*qdd[i,j]\n \n q[i,j]=q[i,j-1]+dq\n qd[i,j]=qd[i,j-1]+dqd\n qdd[i,j]=qdd[i,j-1]+dqdd\n\ndef peak (data,dur_ratio):\n\n if not data:\n raise Exception('Time series input expected')\n \n num_CDF = 1000\n min_CDF = 0.0005\n max_CDF = 0.9995\n CDF_pk = np.linspace(min_CDF,max_CDF, num_CDF)\n \n sdata = data.shape\n max_est = np.zeros((sdata[0],1))\n min_est = np.zeros((sdata[0],1))\n\n for i in range(sdata[0]):\n\n X = data[i,:]\n n = len(X)\n avg_X = np.mean(X)\n sorted_X = sorted(X)\n\n CDF_X = (np.arange(1,n+1))/(n+1)\n snv = isnc(CDF_X)\n avg_snv = np.mean(snv)\n \n sigma = (np.sum(np.multiply(snv,sorted_X)) - n*avg_snv*avg_X)/(np.sum(np.power(snv,2))-n*avg_snv**2)\n mu = avg_X - sigma*avg_snv\n X_fit = mu + sigma*snv\n\n\n norm_PPCC = sigma *np.std(snv)/np.std(sorted_X)\n\n '''\n --------------------------------------------------------------------------\n Estimate the mean zero upcorssing rate of a process y(t) with standard\n normal probability distribution using the classical Rice(1954) results as\n follow:\n --------------------------------------------------------------------------\n Estimate the interval of integration in frequency domain. This is done\n by matching variances obtained from the timehistory data and integration\n of the spectra in the frequency domain\n Variance from time history\n '''\n stdX = np.std(X)\n varX = np.power(stdX,2)\n\n df = 65536\n fs = 5.12\n\n f, S_X = welch(X,fs,nfft=df, detrend=False)\n sf = (df*0.5)\n si = 0\n var_X = (fs/df)*np.sum(S_X[si:sf])\n temp = var_X\n si = 1\n var_X = (fs/df)*np.sum(S_X[si:sf])\n\n while (abs(var_X-varX)varX):\n temp = var_X\n si = si+1\n var_X = (fs/df)*np.sum(S_X[si:sf])\n\n var_X = temp\n\n si = si -1 \n \n numer = np.trapz(f[si:sf],np.multiply(np.power(f[si:sf],2),S_X[si:sf]))\n denom = np.trapz(f[si:sf],np.multiply(f[si:sf],S_X[si:sf]))\n\n nu_y = np.sqrt(numer/denom)\n\n y_pk = np.sqrt(2*np.log(np.divide(-dur_ratio*nu_y*3600,np.log(CDF_pk))))\n\n X_max = y_pk *sigma +mu\n X_min = -y_pk *sigma +mu\n\n pdf_pk = np.multiply(np.multiply(-y_pk,CDF_pk),np.log(CDF_pk))\n max_est[i] = np.trapz(y_pk,np.multiply(pdf_pk,X_max))\n min_est[i] = np.trapz(y_pk,np.multiply(pdf_pk,X_min))\n \n return max_est , min_est\ndef td_response(gama,q,n,k,N):\n\n mr = np.zeros((n,k,N))\n\n for i in range(n):\n for j in range(k):\n for l in range(N):\n mr[i,j,l] = gama[i,j]*q[j,l]\n \n tr = np.zeros((n,N))\n\n for i in range(n):\n for j in range(k):\n for l in range(N):\n tr[i,l] = tr[i,l] + gama[i,j]*q[j,l]\n \n return mr, tr\n\n","repo_name":"chowlet5/Time_Domain_Analysis","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":6361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"20854975607","text":"from tkinter import *\r\nfrom tkinter import messagebox\r\ndef clickButton():\r\n messagebox.showinfo('요기제목','요기내용')\r\n\r\n\r\nwindow = Tk()\r\n\r\nlabel1 = Label(window, text=\"Learning Python\")\r\nlabel2 = Label(window, text=\"파이썬\", font=(\"궁서체\",30), fg=\"blue\")\r\nlabel3 = Label(window, text=\"Python\", font=(\"맑은고딕\",30), bg=\"red\", height=5, anchor=SE)\r\n\r\n\r\nphoto = PhotoImage(file='C:\\Images\\Pet_GIF\\Pet_GIF(256x256)\\cat01_256.gif')\r\nlabel4 = Label(window, image=photo)\r\nbutton1 = Button(window, text='나를 눌러줘', command=clickButton)\r\nbutton2 = Button(window, image=photo, command=clickButton)\r\n\r\n\r\nlabel1.pack(side=LEFT); label2.pack();label3.pack();label4.pack();button1.pack();\\\r\nbutton2.pack()\r\nwindow.mainloop()","repo_name":"leegj93/PythonProject","sub_path":"Code05-02 GUI 02.py","file_name":"Code05-02 GUI 02.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"23476539223","text":"#!/usr/bin/env python\n'''\nExample script to plot a given data distribution compared with a standard\nGaussian distribution.\n'''\n\nimport matplotlib.pyplot as plt\nimport mne\nfrom mne.datasets import sample\nfrom jumeg.jumeg_plot import plot_histo_fit_gaussian\n\ndata_path = sample.data_path()\nsubjects_dir = data_path + '/subjects'\n\nfname_raw = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'\nfname_event = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'\n\nraw = mne.io.read_raw_fif(fname_raw)\nevents = mne.read_events(fname_event)\n\n# add a bad channel\nraw.info['bads'] += ['MEG 2443']\n\n# pick MEG channels\npicks = mne.pick_types(raw.info, meg='mag', eeg=False, stim=False, eog=False,\n exclude='bads')\n\n# Define epochs for left-auditory condition\nevent_id, tmin, tmax = 1, -0.2, 0.5\nepochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,\n baseline=(None, 0), reject=dict(mag=4e-12))\n\ndata = epochs.get_data()\n\nfig = plot_histo_fit_gaussian(data, nbins=100, fnout=None, show=True)\n","repo_name":"jdammers/jumeg","sub_path":"examples/decompose/plot_data_distribution.py","file_name":"plot_data_distribution.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"16"} +{"seq_id":"30284706145","text":"import numpy as np\nfrom nearest_neighbors import KNNClassifier\nimport sklearn.neighbors\nimport sklearn.metrics\n\n\ndef kfold(n, n_folds):\n idx_array = []\n indices = np.arange(n)\n fold_sizes = np.full(n_folds, n // n_folds, dtype=int)\n fold_sizes[:n % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n idx_train = np.hstack((indices[0:start], indices[stop:]))\n idx_test = indices[start:stop]\n idx_array.append((idx_train, idx_test))\n current = stop\n return idx_array\n# source: sklearn's github\n\n\ndef knn_cross_val_score(X, y, k_list, score, cv, **kwargs):\n X = np.array(X)\n y = np.array(y)\n clf = KNNClassifier(k=k_list[0], **kwargs)\n if cv is None:\n cv = kfold(X.shape[0], 3)\n cv_scores = {k: np.zeros(len(cv)) for k in k_list}\n for i, (train, test) in enumerate(cv):\n clf.k = max(k_list)\n clf.fit(X[train], y[train])\n neighbours_dist, neighbours_class = clf.find_kneighbors(X[test], return_distance=True)\n neighbours_class = np.apply_along_axis(lambda i: clf.y[i], axis=0, arr=neighbours_class)\n for k in k_list:\n clf.k = k\n ans = pretrained_predict(clf, X[test], neighbours_dist[:, :k], neighbours_class[:, :k])\n cv_scores[k][i] = accuracy(ans, y[test])\n return cv_scores\n\n\ndef pretrained_predict(clf, X, neighbours_dist, neighbours_class):\n prediction = np.zeros(X.shape[0])\n if not clf.weights:\n for i in range(X.shape[0]):\n counts = np.bincount(neighbours_class[i, :])\n prediction[i] = np.argmax(counts)\n else:\n for i in range(X.shape[0]):\n counts = np.zeros(len(np.unique(clf.y)))\n for j in range(clf.k):\n counts[neighbours_class[i][j]] += 1 / (neighbours_dist[i][j] + 1e-5)\n prediction[i] = np.argmax(counts)\n return prediction\n\n\ndef accuracy(a, b):\n return np.sum(a == b) / a.shape[0]\n","repo_name":"vorobyov01/Data-Science","sub_path":"prac/prac1/cross_validation.py","file_name":"cross_validation.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70129707530","text":"# 3499. 퍼펙트 셔플\n\nfor t in range(1, int(input())+1):\n n = int(input())\n cards = input().split()\n half = (n//2) + 1 if n % 2 else n // 2\n one = cards[:half]\n two = cards[half:]\n print(f'#{t}', end=' ')\n for i in range(n//2):\n print(f'{one[i]} {two[i]}', end=' ')\n if n % 2:\n print(f'{one[-1]}')\n else:\n print()","repo_name":"kylekim2123/Algorithm-with-Python","sub_path":"SWEA/D3/[3499]퍼펙트셔플.py","file_name":"[3499]퍼펙트셔플.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"17700194808","text":"#!/usr/bin/python3\n\n\"\"\"\nscript taht prints the id of the state that have the name passd\nas an arqument, from the database\n\"\"\"\n\nimport sys\nfrom model_state import Base, State\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.engine.url import URL\n\nif __name__ == \"__main__\":\n\n url = URL.create(\n drivername='mysql',\n username=sys.argv[1],\n password=sys.argv[2],\n host='localhost',\n port=3306,\n database=sys.argv[3]\n )\n\n engine = create_engine(url)\n Session = sessionmaker(bind=engine)\n\n session = Session()\n\n state = session.query(State).filter(State.name == sys.argv[4]).first()\n\n if (state):\n print(state.id)\n else:\n print(\"Not found\")\n","repo_name":"mojtabababiker/alx-higher_level_programming","sub_path":"0x0F-python-object_relational_mapping/10-model_state_my_get.py","file_name":"10-model_state_my_get.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27133680489","text":"import numpy as np\nimport cv2\n\ndepth_image = cv2.imread('./videos/room.png', cv2.IMREAD_GRAYSCALE)\n\n# Normalize depth values to [0, 1]\nnormalized_depth = cv2.normalize(depth_image.astype(np.float32), None, 0, 1, cv2.NORM_MINMAX)\n\n# Create an empty RGB image with the same dimensions as the depth image\nrgb_image = np.zeros((*normalized_depth.shape, 3), dtype=np.float32)\n\nhue_margin = 0.01\nhue_padding = 0.01\n\nHCYwts = np.array([0.299, 0.587, 0.114])\n\ndef Hue2RGB(hue):\n h = hue * 6.0\n r = np.abs(h - 3) - 1\n g = 2 - np.abs(h - 2)\n b = 2 - np.abs(h - 4)\n return np.array([np.clip(r, 0, 1), np.clip(g, 0, 1), np.clip(b, 0, 1)])\n\n\ndef RGB2Hue(RGB):\n Cmax = np.max(RGB)\n Cmin = np.min(RGB)\n delta = Cmax - Cmin\n\n if delta == 0:\n hue = 0\n elif Cmax == RGB[0]:\n hue = (((RGB[1] - RGB[2]) / delta) % 6) / 6.0\n elif Cmax == RGB[1]:\n hue = (((RGB[2] - RGB[0]) / delta) + 2) / 6.0\n else:\n hue = (((RGB[0] - RGB[1]) / delta) + 4) / 6.0\n\n return np.array([hue, delta / Cmax, Cmax])\n\ndef HCYtoRGB(HCY):\n RGB = Hue2RGB(HCY[0])\n Z = np.dot(RGB, HCYwts)\n if HCY[2] < Z:\n HCY[1] *= HCY[2] / Z\n else:\n HCY[1] *= (1 - HCY[2]) / (1 - Z)\n return (RGB - Z) * HCY[1] + HCY[2]\n\n\ndef RGBtoHCY(RGB):\n HCV = RGB2Hue(RGB)\n Y = np.dot(RGB, HCYwts)\n Z = np.dot(Hue2RGB(HCV[0]), HCYwts)\n if Y < Z:\n HCV[1] *= Z / (Y + np.finfo(float).eps)\n else:\n HCV[1] *= (1 - Z) / (1 - Y + np.finfo(float).eps)\n return np.array([HCV[0], HCV[1], Y])\n\nfor i in range(normalized_depth.shape[0]):\n for j in range(normalized_depth.shape[1]):\n hue = np.round(normalized_depth[i, j] * (1 - hue_padding * 2) + hue_padding, 6)\n hue = np.clip(hue, 0, 1) * (1 - hue_margin * 2) + hue_margin\n lightness = 0.1 + 0.8 * normalized_depth[i, j] # Scale the Lightness channel within the range [0.1, 0.9]\n rgb_image[i, j] = HCYtoRGB(np.array([hue, 1, lightness]))\n\nrgb_image = (rgb_image*255).astype(np.uint8)\ncv2.imwrite('./videos/hcyrgb1.png', rgb_image)\n\nprocessed_depth = np.zeros_like(normalized_depth, dtype=np.float32)\n\nfor i in range(rgb_image.shape[0]):\n for j in range(rgb_image.shape[1]):\n hcl_color = RGBtoHCY((rgb_image[i, j].astype(np.float32) / 255)) # Modified line\n hue = hcl_color[0] # Modified line\n hue = (hue - hue_margin) / (1 - hue_margin * 2)\n hue = (hue - hue_padding) / (1 - hue_padding * 2)\n processed_depth[i, j] = np.interp(hue, (0, 1), (depth_image.min(), depth_image.max()), period=1)\n\n\ndepth_difference = np.abs(depth_image.astype(np.float64) - processed_depth)\nprint(\"Standard deviation:\", np.std(depth_difference))\ndepth_sum = np.sum(depth_difference)\nprint(\"Sum:\",np.mean(depth_difference))\noutput_image = np.hstack([depth_image, processed_depth.astype(np.uint8), depth_difference.astype(np.uint8)])\n\ncv2.imwrite('./videos/hcy1.png', output_image)","repo_name":"Hu5ha/RGBD-video-recorder","sub_path":"Humanextractandvideocombining/hcy.py","file_name":"hcy.py","file_ext":"py","file_size_in_byte":2898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12984872201","text":"import os\n\n\nclass Config:\n DEBUG = True\n SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:123456@127.0.0.1:3306/flaskblog'\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n SQLALCHEMY_ECHO = True\n SECRET_KEY = 'qweridkfogji'\n # 项目路径\n BASE_DIR = os.path.dirname(os.path.abspath(__file__))\n # 静态文件夹的路径\n STATIC_DIR = os.path.join(BASE_DIR, 'static')\n TEMPLATE_DIR = os.path.join(BASE_DIR, 'templates')\n # 头像的上传目录\n UPLOAD_ICON_DIR = os.path.join(STATIC_DIR, 'upload/icon')\n # 相册的上传目录\n UPLOAD_PHOTO_DIR = os.path.join(STATIC_DIR, 'upload/photo')\n\n\nclass DevelopmentConfig(Config):\n ENV = 'development'\n\n\nclass ProductionConfig(Config):\n ENV = 'production'\n DEBUG = False\n","repo_name":"zzw-111-bit/FlaskBlog","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"42362795194","text":"from astropy.io import fits\nimport numpy as np\nimport os\nimport logging\nimport matplotlib.pyplot as plt\nfrom astropy.visualization import interval\n\nLOG = logging.getLogger(__name__)\n\nfrom . import constants\nfrom . import mask as mk\n\n\ndef write_fits(image, filename, header=None, overwrite=True):\n \"\"\"\n Create and write FITS file containing images for each dither point\n with substracted reference image\n\n\n :param image: 2D image to store in the FITS file. If a MaskedArray is provided, masked values are filled with 0\n :type image: nd.array(y,x)\n :param dict header: header for the FITS file\n :type header: astropy.io.fits.Header or dict\n :param str filename: filename for the output FITS to be created\n :param bool overwrite: overwrite substracted FITS file if they exists\n \"\"\"\n # Handle the special case of a maskedArray because writing to fits wouldn't work with it. Instead, we fill values\n # with 0 and replace with a standard array.\n if isinstance(image, np.ma.MaskedArray):\n image = image.filled(0)\n\n # Correct HIERARCH warnings by rewriting the header\n if isinstance(header, fits.Header):\n cards = header.cards\n else:\n cards = []\n for k, v in header.items():\n # Tuple mean there's a comment\n if isinstance(v, tuple):\n cards.append((k, *v))\n else:\n cards.append((k, v, \"\"))\n\n corrected_header = fits.Header()\n\n if header is not None:\n for (key, value, comment) in cards:\n if len(key) > 8:\n key = f\"hierarch {key}\"\n\n corrected_header[key] = (value, comment)\n\n hdu = fits.PrimaryHDU(image, header=corrected_header)\n hdulist = fits.HDUList([hdu])\n\n path = os.path.dirname(filename)\n if path and not os.path.isdir(path):\n os.makedirs(path)\n\n LOG.info(f\"Write file '{filename}'\")\n hdulist.writeto(filename, overwrite=overwrite)\n\n\ndef write_jwst_fits(image, filename, header=None, overwrite=True):\n \"\"\"\n Create and write FITS file containing an image and metadatas\n Try to recreate a JWST MIRI datamodel, not in detail but the general architecture\n metadata will be in the primary header and image in the SCI header.\n\n\n :param image: 2D image to store in the FITS file. If a MaskedArray is provided, masked values are filled with 0\n :type image: nd.array(y,x)\n :param dict header: header for the FITS file\n :type header: astropy.io.fits.Header or dict\n :param str filename: filename for the output FITS to be created\n :param bool overwrite: overwrite substracted FITS file if they exists\n \"\"\"\n # Handle the special case of a maskedArray because writing to fits wouldn't work with it. Instead, we fill values\n # with 0 and replace with a standard array.\n if isinstance(image, np.ma.MaskedArray):\n image = image.filled(0)\n \n # Correct HIERARCH warnings by rewriting the header\n if isinstance(header, fits.Header):\n cards = header.cards\n else:\n cards = []\n for k, v in header.items():\n # Tuple mean there's a comment\n if isinstance(v, tuple):\n cards.append((k, *v))\n else:\n cards.append((k, v, \"\"))\n\n corrected_header = fits.Header()\n\n if header is not None:\n for (key, value, comment) in cards:\n if len(key) > 8:\n key = f\"hierarch {key}\"\n\n corrected_header[key] = (value, comment)\n\n phdu = fits.PrimaryHDU(header=corrected_header)\n\n sci_hdu = fits.ImageHDU(image, name=\"SCI\")\n err_hdu = fits.ImageHDU(name=\"ERR\")\n\n fake_mask = np.zeros_like(image, dtype=\"uint\")\n dq_hdu = fits.ImageHDU(fake_mask, name=\"DQ\")\n\n hdulist = fits.HDUList([phdu, sci_hdu, err_hdu, dq_hdu])\n\n path = os.path.dirname(filename)\n if path and not os.path.isdir(path):\n os.makedirs(path)\n\n LOG.info(f\"Write file '{filename}'\")\n hdulist.writeto(filename, overwrite=overwrite)\n\n\ndef fits_thumbnail(filename, fits_extension=1, ext=\"jpg\", out=None):\n \"\"\"\n Create a thumbnail as a .png file of one extension of a fits file with Zscale\n\n WARNING: That extension must be 2d obviously\n\n :param str filename: absolute or relative path to fits file\n :param int fits_extension: [optional] extension 1 by default\n :param str ext: [optional] output bitmap extension (by default: png). If out is defined, ext is ignored.\n :param str out: [optional] output filename. Allow to write the thumbnail in a different folder and with a\n different name than the default\n \"\"\"\n with fits.open(filename) as hdulist:\n image = hdulist[fits_extension].data\n\n if ext != \"jpg\" and out is not None:\n LOG.warning(f\"fits_thumbnail: ext parameter is ignored when out is defined.\")\n\n if out is None:\n basename, dummy = os.path.splitext(filename)\n image_filename = f\"{basename}.{ext}\"\n else:\n image_filename = out\n\n write_thumbnail(image, image_filename)\n\n\ndef write_thumbnail(image, filename):\n \"\"\"\n Write an image to bitmap with a ZScale, and the same pixel resolution\n\n :param image:\n :param str filename: output filename (e.g. \"myimage.png\")\n \"\"\"\n\n zscale = interval.ZScaleInterval(nsamples=600, contrast=0.25, max_reject=0.5, min_npixels=5, krej=2.5,\n max_iterations=5)\n (vmin, vmax) = zscale.get_limits(image)\n\n basedir = os.path.dirname(filename)\n if not os.path.isdir(basedir):\n os.makedirs(basedir)\n\n plt.imsave(filename, image, cmap=\"viridis\", vmin=vmin, vmax=vmax, origin=\"lower\")\n\n LOG.info(f\"Write file '{filename}'\")\n\n\ndef MIRI_flags_image(filename):\n \"\"\"\n Display all flags for a single image into individual images to find what features correspond to what flag\n\n Will write 32 images next to the original file with postfix *_flag_i.png\n\n :param filename: FITS filename\n \"\"\"\n nb_flags = 32\n\n basename, dummy = os.path.splitext(filename)\n\n hdulist = fits.open(filename)\n # Create a masked array\n mask = hdulist['DQ'].data\n\n individual_masks = mk.get_separated_dq_array(mask)\n\n for i in range(nb_flags):\n fig, ax = plt.subplots(figsize=(10, 7.5))\n\n out_filename = f\"{basename}_flag_{i}.png\"\n flag_detail = constants.status_pipeline[2**i]\n title = f\"flag 2^{i}: {2**i}\"\n\n flag_image = individual_masks[:, :, i]\n\n ax.imshow(flag_image, cmap='Greys_r', origin=\"lower\")\n ax.set_title(title)\n ax.set_ylabel(\"Y pixels\")\n ax.set_xlabel(\"X pixels\")\n fig.suptitle(f\"Flags {flag_detail}: (Black=Not flagged ; White=flagged)\")\n fig.savefig(out_filename)\n plt.close(fig)\n","repo_name":"ccossou/miritools","sub_path":"miritools/write.py","file_name":"write.py","file_ext":"py","file_size_in_byte":6724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8172515907","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 21 16:52:32 2021\n\n@author: guang\n\"\"\"\n\nimport pandas as pd\nimport os\nimport matplotlib\n# matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom pathlib import Path\nimport time\nimport copy\n\ndef ExtendOutput(pathbase,csvList):\n for outFile in csvList:\n filename = outFile.split('_')\n idx1 = int(filename[0].split('output')[-1])\n idx2 = int(filename[1])\n if idx1 != idx2:\n dataFrame = pd.read_csv(pathbase/outFile)\n dataFrame['RV'] = dataFrame['RV'].mul(-1)\n dataFrame['RVmean'] = dataFrame['RVmean'].mul(-1)\n name = outFile.split('.')[0].split('_')\n index1 = name[0].split('output')[1]\n index2 = name[1]\n relativeRV = str(-int(name[2]))\n newFileName = 'output' + index2 + '_' + index1 + '_' + relativeRV + '.csv'\n dataFrame.to_csv(pathbase/newFileName,index = False)\n \ndef ReadAll(N,pathbase,csvList):\n resultAll = []\n for i in range(N):\n outputTemp = []\n for j in range(N):\n index1 = str(int(i/10)) + str(i%10)\n index2 = str(int(j/10)) + str(j%10)\n filename = [item for item in csvList if item.startswith('output'+index1+'_'+index2)]\n assert len(filename) <= 1, 'May there are duplicates files with same index'\n assert len(filename) >= 1, 'No output named start with this index:{},{}'.format(index1,index2)\n dataFrame = pd.read_csv(pathbase/filename[0])\n outputTemp.append(dataFrame)\n resultAll.append(outputTemp)\n return resultAll\n\ndef RVSuborder(resultList,suborderLen):\n RVList = []\n for idx1 in range(len(resultList)):\n start = time.perf_counter()\n dfsubList = resultList[idx1]\n RVI = np.empty(suborderLen,dtype=float) # for all suborders\n VarI = np.empty(suborderLen,dtype=float)\n for suborderIdx in range(suborderLen):\n RVtmp = np.empty(N,dtype=float)\n vartmp = np.empty(N,dtype=float)\n for idx2 in range(len(dfsubList)):\n subdf = dfsubList[idx2].iloc[[suborderIdx]] \n RVtmp[idx2] = subdf['RV'].item()\n vartmp[idx2] = subdf['var'].item()\n weighti = (1/vartmp)/np.sum(1/vartmp)\n RVi = np.sum(RVtmp*weighti)\n # under assumption of independent estimator\n vari = 1/np.sum(1/vartmp)\n RVI[suborderIdx] = RVi\n VarI[suborderIdx] = vari\n dfRVI = pd.DataFrame({\"RVI\":RVI,\"VarI\":VarI})\n RVList.append(dfRVI)\n print(time.perf_counter()-start)\n return RVList\n\ndef RVAggAll(RVList):\n RVout = np.empty(len(RVList),dtype=float)\n Varout = np.empty(len(RVList),dtype=float)\n for i in range(len(RVList)):\n df = RVList[i]\n weight = (1/df['VarI'])/(1/df['VarI']).sum()\n RV = (df[\"RVI\"]*weight).sum()\n var = 1/(1/df['VarI']).sum()\n RVout[i] = RV\n Varout[i] = var\n return RVout,Varout\n\n## First compute all suborders, then pairwise\ndef RVAggAll2(resultList):\n RV = np.empty(len(resultList),dtype=float)\n Var = np.empty(len(resultList),dtype=float)\n for i in range(len(resultList)):\n subList = resultList[i]\n RVtmp = np.empty(len(subList),dtype=float)\n Vartmp = np.empty(len(subList),dtype=float)\n for j in range(len(subList)):\n dataFrame = subList[j]\n weight = 1/dataFrame['var']\n weightSum = sum(weight)\n weightedRV = sum(dataFrame['RV']*weight/weightSum)\n weightedVar = 1/weightSum\n RVtmp[j] = weightedRV\n Vartmp[j] = weightedVar\n weight = 1/Vartmp\n weightSum = sum(weight)\n weightedRV = sum(weight*RVtmp/weightSum)\n weightedVar = 1/weightSum\n RV[i] = weightedRV\n Var[i] = weightedVar\n return RV,Var\n\ndef SaveDFbySuborder(pathbase,resultList,suborderLen):\n pathTarget = pathbase / 'bysuborder'\n if not os.path.exists(pathTarget):\n os.makedirs(pathTarget)\n fileLength = len(os.listdir(pathTarget))\n if fileLength == suborderLen:\n print('Generated suborder files exist!')\n else:\n length = len(resultList)\n for i in range(suborderLen):\n outIdx1 = np.empty(length*length,dtype=int)\n outIdx2 = np.empty(length*length,dtype=int)\n RV = np.empty(length*length,dtype=float)\n Var = np.empty(length*length,dtype=float)\n for idx1 in range(length):\n for idx2 in range(length):\n df = resultList[idx1][idx2]\n index = idx1*length + idx2\n outIdx1[index] = idx1\n outIdx2[index] = idx2\n RV[index] = df['RV'].iloc[i]\n Var[index] = df['var'].iloc[i]\n dfOut = pd.DataFrame({'idx1':outIdx1,'idx2':outIdx2,'RV':RV,'var':Var})\n outFileName = 'suborder_'+str(i)+'.csv'\n dfOut.to_csv(pathTarget/outFileName,index=False)\n return pathTarget\n\ndef RVSuborder2(pathTarget,N):\n csvList = os.listdir(pathTarget)\n csvList = [filename for filename in csvList if filename.endswith('.csv') and filename.startswith('suborder')]\n csvList.sort()\n \n RVList = []\n for file in csvList:\n df = pd.read_csv(pathTarget/file)\n RVoneOrder = np.empty(N,dtype=float)\n VaroneOrder = np.empty(N,dtype=float)\n for idx1 in range(N):\n dfsub = df[df['idx1']==idx1]\n dfsub = dfsub[dfsub['idx2']=N*N:\n print('Extension of files have been already done!!')\n else:\n ExtendOutput(pathbase,csvList) \n if not os.path.exists(pathbase / 'graph'): \n os.makedirs(pathbase / 'graph')\n csvList = os.listdir(pathbase)\n csvList = [ filename for filename in csvList if filename.endswith('.csv')]\n csvList.sort()\n \n lenSuborder = len(pd.read_csv(pathbase/csvList[0]))\n \n \n if ('bysuborder' in os.listdir(pathbase)):\n if len(os.listdir(pathbase/'bysuborder')) == lenSuborder:\n print('Suborder CSV files exist, no need to save')\n pathDFByOrder = pathbase / 'bysuborder'\n else:\n # Read all files into one 2d array \n print('Reading all the raw output CSV files......')\n start = time.perf_counter()\n resultAll = ReadAll(N,pathbase,csvList)\n print('time:{}s'.format(time.perf_counter()-start))\n # Saving New CSV files by suborders\n start = time.perf_counter()\n print('Saving New CSV files by suborders......')\n pathDFByOrder = SaveDFbySuborder(pathbase,resultAll,lenSuborder)\n print('time:{}s'.format(time.perf_counter()-start))\n \n # Deal with suborders rv\n start = time.perf_counter()\n print('Computing RV estimation for all suborders......')\n RVListbySuborder = RVSuborder2(pathDFByOrder,N)\n # Final output\n RVout,Varout = RVAggAll3(RVListbySuborder,N)\n print('time:{}s'.format(time.perf_counter()-start))\n \n # compute correlation between using leave-one-out method\n if ContaminationDet:\n start = time.perf_counter()\n print('Computing correlation by leave-one-out method......')\n corr = map(lambda i:LeaveOneOutCorr(RVListbySuborder,N,i),range(lenSuborder))\n corr = list(corr)\n print('time:{}s'.format(time.perf_counter()-start))\n RemaingCorrIdx = np.array(corr).argsort()[bottomK:]\n RVListRmSmCorr = [RVListbySuborder[i] for i in RemaingCorrIdx]\n RVRmLowCorr,VarRmLowCorr = RVAggAll3(RVListRmSmCorr,N)\n \n \n # # Deal with suborders rv\n # RVList = RVSuborder(resultAll,lengthOut)\n # # Final output\n # RVout,Varout = RVAggAll(RVList)\n \n # # RVout,Varout = RVAggAll2(resultAll)\n RVTrue = pd.read_csv(pathbase/'RVTrue.txt')['RVTrue'].to_numpy()\n \n ### compute errors using subset of N RV estimation\n ### e.g, with 100 spectrum, only use 5,10,20.... spectra to compute average errors.\n if ErrorByN == True:\n Nsubs = np.linspace(2,100,40,dtype=int)\n MAEs = []\n meanSigmas=[]\n for Nsub in Nsubs:\n start = time.perf_counter()\n print('Computing RV estimation for all suborders......')\n RVListbySuborder_Sub = RVSuborder2(pathDFByOrder,Nsub)\n # Final output\n RVoutSub,VaroutSub = RVAggAll3(RVListbySuborder_Sub,Nsub)\n print('time:{}s'.format(time.perf_counter()-start))\n\n RVTrueSub = RVTrue[0:Nsub]\n meansubTrueRVSub = RVTrueSub#-np.mean(RVTrueSub)\n MAE = np.around(abs(RVoutSub-meansubTrueRVSub).mean(),decimals=2)\n MAEs.append(MAE)\n meanSigmas.append(np.sqrt(VaroutSub).mean())\n pd.DataFrame({'sigma':meanSigmas,'MAE':MAEs}).to_csv(pathbase / 'graph' /'MAE_meansub.csv',index=False)\n fig4, ax4 = plt.subplots(figsize=(11,7))\n ax4.plot(Nsubs,MAEs,'o',label = \"MAE\")\n ax4.set_xscale('log')\n ax4.set_yscale('log')\n ax4.set_xlabel(\"Number of Spectra Involved\")\n ax4.set_ylabel(\"MAE\")\n ax4.grid(True, color='gainsboro', linestyle='-', linewidth=0.5)\n ax4.set_title('MAE versus N')\n fig4.legend()\n \n # plot the results\n fig, ax = plt.subplots(figsize=(18,9))\n X = np.arange(N,dtype=int)\n meansubTrueRV = RVTrue#-np.mean(RVTrue)\n \n ax.plot(X,meansubTrueRV,'o',label = \"True Radial Velocity (mean-substracted)\")\n turerms1 = np.around(np.sqrt(((RVout-meansubTrueRV)**2).mean()),decimals=2)\n ax.plot(X,RVout,'o-',label = \"Total Suborders, RMSE:\"+str(turerms1))\n ax.fill_between(X, (RVout + 1.96*np.sqrt(Varout)), (RVout - 1.96*np.sqrt(Varout)), alpha=0.3) \n ax.set_xlabel(\"Index of epochs\")\n ax.set_ylabel(\"Radial Velocity/ m/s (mean-substracted)\")\n ax.grid(True, color='gainsboro', linestyle='-', linewidth=0.5)\n fig.legend()\n \n fig1, ax1 = plt.subplots(figsize=(11,7))\n error = 1.96*np.sqrt(Varout)\n ax1.plot(X,meansubTrueRV,'-',label = \"True Radial Velocity (mean-substracted)\")\n turerms1 = np.around(np.sqrt(((RVout-meansubTrueRV)**2).mean()),decimals=2)\n ax1.errorbar(X, RVout, yerr=error,color='black',capsize=3, fmt='o',label = \"Estimated Radial Velocity, RMSE:\"+str(turerms1))\n ax1.set_xlabel(\"Index of epochs\")\n ax1.set_ylabel(\"Radial Velocity/ m/s (mean-substracted)\")\n ax1.grid(True, color='gainsboro', linestyle='-', linewidth=0.5)\n ax1.set_title('RV estimation using all suborders')\n fig1.legend()\n \n if ContaminationDet:\n fig2, ax2 = plt.subplots(figsize=(11,7))\n error = 1.96*np.sqrt(VarRmLowCorr)\n ax2.plot(X,meansubTrueRV,'-',label = \"True Radial Velocity (mean-substracted)\")\n turerms1 = np.around(np.sqrt(((RVRmLowCorr-meansubTrueRV)**2).mean()),decimals=2)\n ax2.errorbar(X, RVRmLowCorr, yerr=error,color='black',capsize=3, fmt='o',label = \"Estimated Radial Velocity, RMSE:\"+str(turerms1))\n ax2.set_xlabel(\"Index of epochs\")\n ax2.set_ylabel(\"Radial Velocity/ m/s (mean-substracted)\")\n ax2.grid(True, color='gainsboro', linestyle='-', linewidth=0.5)\n ax2.set_title('RV estimation after removing suborders with low correlation')\n fig2.legend()\n \n # plot the histogram of variance using all suborders\n fig3, ax3 = plt.subplots(figsize=(11,7))\n ax3.hist(Varout)\n ax3.set_title('Histogram of Variance for Each RV Estimation')\n \n if saveImg == True:\n fig.savefig(pathbase / 'graph' / \"RVcomparison.png\")\n fig1.savefig(pathbase / 'graph' / \"RVcomparison1.png\")\n fig3.savefig(pathbase /'graph'/'hist.png')\n if ContaminationDet:\n fig2.savefig(pathbase / 'graph' / \"RVcomparison2.png\")\n if ErrorByN:\n fig4.savefig(pathbase / 'graph' / \"MAE_N.png\")\n","repo_name":"FitzWang/Marvel","sub_path":"agg_pairwise.py","file_name":"agg_pairwise.py","file_ext":"py","file_size_in_byte":13801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"7075568872","text":"\"\"\"\nУ нас есть страница для отображения списка студентов, мы даже передаем их из вьюхи students_view, но на странице их не показываем.\nНадо бы это исправить.\n\nЗадания:\n 1. Найдите в проекте темплэйт students.html и сделайте так, чтобы на странице имя каждого студента выводилось с новой строки.\n Используйте тэмплэйт тэг for для этого и оборачивайте каждое имя в html тэг

\n 2. Откройте страницу http://127.0.0.1:8000/students/ и посмотрите на результат.\n\"\"\"\nfrom django.shortcuts import render\n\n\ndef students_view(request):\n students = [\n 'Иван',\n 'Мария',\n 'Петр',\n 'Алексей',\n ]\n\n return render(request, 'level_1/students.html', context={'students': students})\n","repo_name":"learnpythonru/django_templates_challenges","sub_path":"django_templates_homework/views/level_1/f_students.py","file_name":"f_students.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"20262174831","text":"# https://leetcode.com/problems/longest-palindromic-substring/\n \n# Given a string s, return the longest palindromic substring in s.\n# A string is called a palindrome string if the reverse of that string is the same as the original string.\n\n# Input: s = \"babad\"\n# Output: \"bab\"\n# Explanation: \"aba\" is also a valid answer.\n\n# Input: s = \"cbbd\"\n# Output: \"bb\"\n\n\n# The following code works but it is not memory efficient, so let's try a second option\ndef longestPalindrome(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n n = len(s)\n max_len = n\n k = n\n\n if len(s) == 1:\n return s\n\n while n!= 1:\n if k == n:\n window = s[0:n]\n if window == window[::-1]:\n return window\n else:\n k -= 1\n continue\n\n for i in range(k):\n window = s[0:k]\n if window == window[::-1]:\n return window\n\n for i in range(max_len-k):\n window = window[1:] + s[k+i]\n if window == window[::-1]:\n return window\n else:\n continue\n k -= 1\n n -= 1\n\n \n# Second approach, using dynamic programming\n# 1. Create a DP table with size of n x n, with n = length of the string\n# 2. Fill the diagonal with True\n# 3. Starting from the backward, iterate the outer loop backwards and iterate the inner loop forward\n# 4. Check characters at i and j position, if the 2 characters matches, 2 conditions need to be checked:\n# a. First, to see if j - i == 1\n# b. See if dp[i+1][j-1] == True\n\ndef longestPalindrome(s):\n longest_palin = ''\n # initialize matrix\n dp = [[0] * len(s) for _ in range(len(s))]\n for i in range(len(s)):\n dp[i][i] = True\n longest_palin = s[i]\n\n for i in range(len(s) - 1, -1, -1):\n for j in range(i + 1, len(s)):\n if s[i] == s[j]:\n if j - i == 1 or dp[i+1][j-1] == True:\n dp[i][j] = True\n if len(longest_palin) < len(s[i:j+1]):\n longest_palin = s[i:j+1]\n return longest_palin\n\n\ns = 'eabcb'\nprint(longestPalindrome(s))\n","repo_name":"huahuawong/daily-coding-problems","sub_path":"blind75/3. Longest-Palindromic-Substring.py","file_name":"3. Longest-Palindromic-Substring.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"40268969228","text":"# Given an array nums of size n, return the majority element.\n# The majority element is the element that appears more than\n# ⌊n / 2⌋ times. You may assume that the majority element\n# always exists in the array.\n\n# Beats 95% apparently\ndef majorityElement(nums):\n\t\tif nums==[]:\n\t\t\treturn 0\n\t\tn=set(nums)\n\t\tmaxm = 0\n\t\tfor x in n:\n\t\t\tt = nums.count(x)\n\t\t\tif t>maxm:\n\t\t\t\tmaxm=t\n\t\t\t\tm=x\n\t\treturn m\nprint(majorityElement([6,5,5]))\n\n# my Solution terible runtime beats 18%\n# def majorityElement(nums):\n# nums.sort()\n# if(len(nums) == 1):\n# return nums[0]\n# if(len(nums) % 2 == 0):\n# return nums[(len(nums) // 2) - 1]\n# else:\n# return nums[(len(nums) // 2)]\n# print(majorityElement([3,4,3]))\n\n# Boyer Moore Majority Voting\n# def majorityElement(nums):\n# candidate = nums[0]\n# vote = 0 \n# for i in nums:\n# if(i == candidate):\n# vote += 1 \n# else:\n# vote -= 1 \n# if(vote == 0):\n# candidate = i \n# vote = 1 \n# if(vote > len(nums)//2):\n# return candidate\n# return candidate\n# print(majorityElement([6,5,5]))\n\n\n\n","repo_name":"Sudhanva1999/PraticeProblems","sub_path":"LeetCode/Arrays/MajorityElement.py","file_name":"MajorityElement.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"33675104129","text":"from odoo import api, fields, models\n\n\nclass MassMailingGroup(models.Model):\n _name = \"mail.mass_mailing.group\"\n _description = \"Mass Mailing Group\"\n\n name = fields.Char(\n compute=\"_compute_name\",\n store=True,\n )\n mailings_ids = fields.One2many(\n comodel_name=\"mailing.mailing\",\n inverse_name=\"group_id\",\n string=\"Mailings\",\n )\n distribution_list_id = fields.Many2one(\n comodel_name=\"distribution.list\",\n string=\"Distribution List\",\n )\n internal_instance_id = fields.Many2one(\n comodel_name=\"int.instance\",\n )\n total_sent = fields.Integer(\n compute=\"_compute_total_sent\",\n store=True,\n string=\"Total sent (%)\",\n )\n\n @api.depends(\"mailings_ids.group_id\", \"mailings_ids.contact_ab_pc\")\n def _compute_total_sent(self):\n for group in self:\n group.total_sent = sum(group.mailings_ids.mapped(\"contact_ab_pc\"))\n\n @api.depends(\n \"distribution_list_id\",\n )\n def _compute_name(self):\n for group in self:\n group.name = \"%s (#%s)\" % (group.distribution_list_id.name, group.id)\n","repo_name":"psychedel/mozaik","sub_path":"mozaik_communication/models/mail_mass_mailing_group.py","file_name":"mail_mass_mailing_group.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"33338714158","text":"# reads the measured data and passes them to cost_function to calculate proper metrics. Metrics are averaged and saved to file for further use\n# assumes current working dir\n\nimport scipy.io as skipy\nimport numpy\nimport DyMat\n# its imported dynamically based on selected dir\n# import cost_function as cf\nimport sys\nimport os\nimport time\nimport importlib.util\nimport re\nfrom datetime import datetime\nimport fun_lib\nimport matplotlib.pyplot as plt\n\nfrom fun_lib import ObjectiveVar\n# from matplotlib import pyplot as plt\n# import re\n# import TerminalDS\n\n\n# CONTROL ROOM\nDATA_FOLDER = R\"..\\data\\Valsalva\"\nCOST_FUNCTION_FOLDER = R\"..\\Identification\\valsalva\"\nVALUE_LOG_DIRNAME = R'..\\data\\Valsalva\\ProcessLog'\nVALUE_LOG_FILENAME = '_current_costs.txt'\n\n# draws plots for each individual file\nDRAW_PLOTS = True\n# write the outputfiles with targetValues\nWRITE_FILE = True\nREAD_OBJECTIVES = True\nUSE_WEIGHING = True\n\n# file_set = 'Sitting V_OO'\n# files = [\"V_00_sit_01\", \"V_00_sit_02\", \"V_00_sit_03\", \"V_00_sit_04\"]\n# file_set = 'All sitting'\n# files = [\"V_00_sit_01\", \"V_00_sit_02\", \"V_00_sit_03\", \"V_00_sit_04\", \"V_01_sit_01\", \"V_01_sit_02\", \"V_01_sit_03\", \"V_02_sit_01\", \"V_02_sit_02\", \"V_03_sit_01\", \"V_03_sit_02\"]\nfile_set = 'All supine'\nfiles = [\"V_00_sup_01\", \"VEc_01_sup_01\", \"VEc_01_sup_02\", \"VEc_01_sup_03\", \"VEc_02_sup_01\", \"VEc_02_sup_02\", \"VEc_03_sup_01\"]\n\n# '] \r\n\r\ndef normalize(text):\r\n text = re.sub(r'[^\\u0621-\\u06CC\\s]+', '', text)\r\n words = text.split(\" \")\r\n words_without_stopwords = [word for word in words if not word in symboles]\r\n return \" \".join(words_without_stopwords)\r\n\r\n\r\ndef prediction(clf,sentece):\r\n sentence=normalize(sentece)\r\n sentence = clf[\"tfidf\"].transform([sentece])\r\n formality = clf['clf'].predict(sentence)[0]\r\n print(formality)\r\n\r\nwith open('logreg_trained_model12.pickle', 'rb') as file:\r\n model = pickle.load(file)\r\nnew_sentence=input()\r\nprediction(model,new_sentence)","repo_name":"mnsalimi/Informal2FormalText","sub_path":"text_classification/logesticRegration/use_saved_model.py","file_name":"use_saved_model.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12479703645","text":"import ast\n\nfrom .inference import NIM_TYPE_MAP, NIM_WIDTH_RANK\n\nfrom py2many.clike import CLikeTranspiler as CommonCLikeTranspiler\n\n\n# allowed as names in Python but treated as keywords in Nim\nnim_keywords = frozenset(\n [\n \"addr\",\n \"and\",\n \"as\",\n \"asm\",\n \"bind\",\n \"block\",\n \"break\",\n \"case\",\n \"cast\",\n \"concept\",\n \"const\",\n \"continue\",\n \"converter\",\n \"defer\",\n \"discard\",\n \"distinct\",\n \"div\",\n \"do\",\n \"elif\",\n \"else\",\n \"end\",\n \"enum\",\n \"except\",\n \"export\",\n \"finally\",\n \"for\",\n \"from\",\n \"func\",\n \"if\",\n \"import\",\n \"in\",\n \"include\",\n \"interface\",\n \"is\",\n \"isnot\",\n \"iterator\",\n \"let\",\n \"macro\",\n \"method\",\n \"mixin\",\n \"mod\",\n \"nil\",\n \"not\",\n \"notin\",\n \"object\",\n \"of\",\n \"or\",\n \"out\",\n \"proc\",\n \"ptr\",\n \"raise\",\n \"ref\",\n \"return\",\n \"shl\",\n \"shr\",\n \"static\",\n \"template\",\n \"try\",\n \"tuple\",\n \"type\",\n \"using\",\n \"var\",\n \"when\",\n \"while\",\n \"xor\",\n \"yield\",\n ]\n)\n\nnim_symbols = {\n ast.Eq: \"==\",\n ast.Is: \"==\",\n ast.NotEq: \"!=\",\n ast.Pass: \"discard\",\n ast.Mult: \"*\",\n ast.Add: \"+\",\n ast.Sub: \"-\",\n ast.Div: \"/\",\n ast.FloorDiv: \"/\",\n ast.Mod: \"%\",\n ast.Lt: \"<\",\n ast.Gt: \">\",\n ast.GtE: \">=\",\n ast.LtE: \"<=\",\n ast.LShift: \"<<\",\n ast.RShift: \">>\",\n ast.BitXor: \"xor\",\n ast.BitOr: \"or\",\n ast.BitAnd: \"and\",\n ast.Not: \"not \",\n ast.IsNot: \"!=\",\n ast.USub: \"-\",\n ast.And: \" and \",\n ast.Or: \" or \",\n ast.In: \"in\",\n}\n\n\ndef nim_symbol(node):\n \"\"\"Find the equivalent C symbol for a Python ast symbol node\"\"\"\n symbol_type = type(node)\n return nim_symbols[symbol_type]\n\n\nclass CLikeTranspiler(CommonCLikeTranspiler):\n def __init__(self):\n super().__init__()\n self._type_map = NIM_TYPE_MAP\n self._statement_separator = \"\"\n\n def visit(self, node) -> str:\n if type(node) in nim_symbols:\n return nim_symbol(node)\n else:\n return super().visit(node)\n\n def visit_Ellipsis(self, node) -> str:\n return \"discard\"\n\n def visit_BinOp(self, node) -> str:\n if isinstance(node.op, ast.Pow):\n left = self.visit(node.left)\n right = self.visit(node.right)\n return f\"{left}^{right}\"\n\n left = self.visit(node.left)\n op = self.visit(node.op)\n right = self.visit(node.right)\n\n left_type = self._typename_from_annotation(node.left)\n right_type = self._typename_from_annotation(node.right)\n\n left_rank = NIM_WIDTH_RANK.get(left_type, -1)\n right_rank = NIM_WIDTH_RANK.get(right_type, -1)\n\n if left_rank > right_rank:\n right = f\"{left_type}({right})\"\n elif right_rank > left_rank:\n left = f\"{right_type}({left})\"\n\n return f\"({left} {op} {right})\"\n\n def visit_Name(self, node) -> str:\n if node.id in nim_keywords:\n return node.id + \"_\"\n if node.id.startswith(\"_\"):\n return \"_\"\n return super().visit_Name(node)\n\n def visit_In(self, node) -> str:\n left = self.visit(node.left)\n right = self.visit(node.comparators[0])\n left_type = self._typename_from_annotation(node.left)\n if left_type == \"string\":\n self._usings.add(\"strutils\")\n return f\"{left} in {right}\"\n","repo_name":"py2many/py2many","sub_path":"pynim/clike.py","file_name":"clike.py","file_ext":"py","file_size_in_byte":3663,"program_lang":"python","lang":"en","doc_type":"code","stars":559,"dataset":"github-code","pt":"16"} +{"seq_id":"23697633014","text":"################################################# Packages/Libraries ##########################################################\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\n\n############################################ Variables and Functions ##########################################################\n\n\nwarehouseWidth = 800\nwarehouseHeight = 650\nshelfWidth = 50\nlaneWidth = 50\nnbrOfAGVs = 6 # Number of vehicles, originally it was set to 6\nspeed = 10\nAGVRadius = 12.5\nchargingRate = 0.05\nconsumingRate = 0.005\nthresholdPower = 3.5\nfullyCharged = 10\ntaskFactor = 0.001 # Probability to create a task for each shelf\nnNodesx = 6\nnNodesy = 3\nunloadingTime = 20\nloadingTime = 10\nremovalTime = 50\n\n\nclass AGV(object):\n\n def __init__(self, pos, fullyCharged):\n self.position = pos\n self.direction = 0\n self.status = 'free' # free, charging, occupied, loading, unloading\n self.power = fullyCharged\n self.clock = 0\n self.parkdir = 0 # to be able to save direction before loading\n self.hasTask = False\n\n\nclass Shelf(object):\n\n def __init__(self, pos, priority):\n self.position = pos\n self.status = 'no task'\n self.priority = priority\n\n\ndef update_AGV_direction(a, nodes):\n chargeBias = 1\n if a.power <= thresholdPower:\n chargeBias = 0.25\n\n nodeNbr = nodes.index(a.position)\n r = np.random.rand()\n\n if nodeNbr in [0, 2, 4]:\n a.direction = 0\n if nodeNbr in [5, 11]:\n a.direction = -np.pi / 2\n if nodeNbr in [1, 3, 7, 9]:\n if r < 1 / 2:\n a.direction = 0\n else:\n a.direction = -np.pi / 2\n if nodeNbr in [6, 8, 10]:\n if r < chargeBias * 1 / 2:\n a.direction = 0\n else:\n a.direction = np.pi / 2\n if nodeNbr in [14, 16]:\n if r < chargeBias * 1 / 2:\n a.direction = np.pi\n else:\n a.direction = np.pi / 2\n if nodeNbr in [13, 15, 17]:\n a.direction = np.pi\n if nodeNbr == 12:\n a.direction = np.pi / 2\n return a\n\n\ndef update_AGV_position(a):\n pos = list(a.position)\n pos[0] = pos[0] + speed * np.cos(a.direction)\n pos[1] = pos[1] + speed * np.sin(a.direction)\n a.position = tuple(pos)\n return a\n\n\ndef update_AGV_power(AGVs):\n for a in AGVs:\n if a.status == 'charging':\n a.power = a.power + chargingRate\n else:\n if a.power > consumingRate:\n a.power = a.power - consumingRate\n else:\n if a.status == 'out of battery':\n if a.clock >= removalTime:\n nodePos = list(nodes[0])\n nodePos[1] = nodePos[1] + laneWidth\n a.position = tuple(nodePos)\n a.clock = 0\n a.status = 'charging'\n else:\n a.clock = a.clock + 1\n else:\n a.power = 0\n a.clock = 0\n a.status = 'out of battery'\n\n\ndef check_for_shelf(a, shelfs, shelfPositions):\n pos = list(a.position)\n check1 = tuple([pos[0] + laneWidth, pos[1]])\n check2 = tuple([pos[0] - laneWidth, pos[1]])\n is_shelf = False\n if check1 in shelfPositions:\n shelfNbr = shelfPositions.index(check1)\n if shelfs[shelfNbr].status == 'task':\n a.position = check1\n a.status = 'loading'\n shelfs[shelfNbr].status = 'no task'\n a.parkdir = np.pi\n is_shelf = True\n if check2 in shelfPositions and is_shelf == False:\n shelfNbr = shelfPositions.index(check2)\n if shelfs[shelfNbr].status == 'task':\n a.position = check2\n a.status = 'loading'\n shelfs[shelfNbr].status = 'no task'\n a.parkdir = 0\n is_shelf = True\n return [a, is_shelf]\n\n\ndef move_AGV(AGV, nodes, shelfs, shelfPositions):\n global completed_tasks\n for a in AGV:\n # if charging, loading, unloading:\n if a.status == 'loading':\n if a.clock == loadingTime:\n pos = list(a.position)\n pos[0] = pos[0] + laneWidth * np.cos(a.parkdir)\n a.position = tuple(pos)\n a.clock = 0\n a.status = 'occupied'\n else:\n a.clock = a.clock + 1\n elif a.status == 'unloading':\n if (a.clock == unloadingTime):\n pos = list(a.position)\n a.direction = np.pi / 2\n pos[1] = pos[1] + laneWidth * np.sin(a.direction)\n a.position = tuple(pos)\n a.clock = 0\n a.status = 'free'\n completed_tasks = completed_tasks + 1\n if (a.clock < unloadingTime):\n pos = list(a.position)\n pos[1] = 0.5 * laneWidth\n a.position = tuple(pos)\n a.clock += 1\n if a.power >= consumingRate:\n if a.status == 'loading':\n if a.clock == loadingTime:\n pos = list(a.position)\n pos[0] = pos[0] + laneWidth * np.cos(a.parkdir)\n a.position = tuple(pos)\n a.clock = 0\n a.status = 'occupied'\n a.hasTask = True\n else:\n a.clock = a.clock + 1\n elif a.status == 'unloading':\n if a.clock == unloadingTime:\n pos = list(a.position)\n pos[1] = pos[1] + laneWidth * np.sin(a.parkdir)\n a.position = tuple(pos)\n a.clock = 0\n a.status == 'free'\n a.hasTask = False\n else:\n a.clock = a.clock + 1\n elif a.status == 'charging':\n if a.power >= fullyCharged:\n a.power = fullyCharged\n pos = list(a.position)\n pos[1] = pos[1] - laneWidth\n a.position = tuple(pos)\n if a.hasTask == True:\n a.status = 'occupied'\n else:\n a.status = 'free'\n\n elif a.position in nodes:\n nodeNbr = nodes.index(a.position)\n if nodeNbr in [0, 1, 2, 3, 4, 5] and a.power <= thresholdPower:\n pos = list(a.position)\n pos[1] = pos[1] + laneWidth\n a.position = tuple(pos)\n a.direction = - np.pi / 2\n a.status = 'charging'\n else:\n a = update_AGV_direction(a, nodes)\n a = update_AGV_position(a)\n elif a.status == 'free':\n tmp = check_for_shelf(a, shelfs, shelfPositions)\n if tmp[1] == False:\n a = update_AGV_position(a)\n else:\n a = update_AGV_position(a)\n return AGV\n\n\ndef unload_AGVs(AGV): # A function to unload the vehicles\n for a in AGV:\n pos = np.array(a.position)\n y = pos[1]\n if (a.status == 'occupied') and (y == 1.5 * laneWidth):\n a.status = 'unloading'\n\n\ndef plot_AGVs(AGV):\n for a in AGV:\n pos = np.array(a.position)\n x = pos[0]\n y = pos[1]\n if a.status == 'free':\n c = plt.Circle((x, y), AGVRadius, edgecolor='k', facecolor='blue', zorder=1)\n elif a.status == 'occupied':\n c = plt.Circle((x, y), AGVRadius, edgecolor='k', facecolor='red', zorder=1)\n else:\n c = plt.Circle((x, y), AGVRadius, edgecolor='k', facecolor='green')\n\n fig = plt.gcf()\n ax = fig.gca()\n ax.add_artist(c)\n plt.axis([0, warehouseWidth, 0, warehouseHeight])\n # plt.pause(1)\n\n\ndef map_shelfs(shelf_matrix):\n global shelfPositions\n global shelfPriority\n for (i, j), value in np.ndenumerate(shelf_matrix):\n if (shelf_matrix[i][j] != 0):\n pos = (50 * j + laneWidth / 2, 50 * i + laneWidth / 2)\n shelfPositions.append(pos)\n shelfPriority.append(shelf_matrix[i][j])\n # return shelfPositions\n\n\ndef plot_shelfs(shelfs): # A function to plot the shelfs\n for s in shelfs:\n pos = list(s.position)\n if (s.status == 'no task'):\n plt.plot(pos[0], pos[1], 'ks', markersize=20) # Blue for those without a task...\n if (s.status == 'task'):\n plt.plot(pos[0], pos[1], 'rs', markersize=20) # Red if it has a task!\n\n\ndef create_task(shelfs, taskFactor): # A function to create tasks for each shelf\n for s in shelfs:\n chance = random.uniform(0, 1)\n scaledTaskFactor = taskFactor / s.priority\n if (scaledTaskFactor > chance) and (s.status == 'no task'): # Create task only if the shelf has no task\n s.status = 'task'\n return shelfs\n\n\ndef run_test(shelf_test_matrix, task_goal):\n AGVs = []\n shelfs = []\n map_shelfs(shelf_test_matrix)\n\n\n # Initialize AGVs\n startPosx = np.linspace(0 + laneWidth/2, warehouseWidth - laneWidth/2, nbrOfAGVs)\n for i in range(len(startPosx)):\n pos = (startPosx[i], warehouseHeight - 75)\n a = AGV(pos, fullyCharged)\n AGVs.append(a)\n\n\n #Update the list 'shelfs' to contain each shelf here:\n for i in range(len(shelfPositions)):\n pos = shelfPositions[i]\n prio = shelfPriority[i]\n s = Shelf(pos, prio)\n shelfs.append(s)\n\n\n # Create Nodes\n nodes = []\n for n in range(nNodesy):\n xPos = np.linspace(laneWidth/2, warehouseWidth - laneWidth/2, nNodesx)\n for i in range(len(xPos)):\n pos = (np.int(xPos[i]), warehouseHeight - 75 - np.int(n * (warehouseHeight-150)/(nNodesy-1)))\n nodes.append(pos)\n\n\n\n time = 0\n while completed_tasks < task_goal:\n time = time +1\n plt.figure(1)\n plt.clf()\n TitleString = 'Time: ' + str(time) + ', Completed Tasks: ' + str(completed_tasks)\n plt.title(TitleString)\n plot_shelfs(shelfs)\n plot_AGVs(AGVs)\n plt.pause(0.0005)\n move_AGV(AGVs, nodes, shelfs, shelfPositions)\n update_AGV_power(AGVs)\n # Time to give some tasks to each shelf!\n shelfs = create_task(shelfs, taskFactor)\n unload_AGVs(AGVs)\n return time\n\n################################################## Driver Code ######################################################\n\n\nshelf_test_matrix = np.array([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\n [0,1,1,0,1,1,0,1,1,0,1,1,0,1,1,0],\n [0,1,1,0,1,1,0,1,1,0,1,1,0,1,1,0],\n [0,1,1,0,1,1,0,1,1,0,1,1,0,1,1,0],\n [0,1,1,0,1,1,0,1,1,0,1,1,0,1,1,0],\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\n [0,3,3,0,3,3,0,1,1,0,1,1,0,1,1,0],\n [0,3,3,0,3,3,0,1,1,0,1,1,0,1,1,0],\n [0,3,3,0,3,3,0,1,1,0,1,1,0,1,1,0],\n [0,3,3,0,3,3,0,1,1,0,1,1,0,1,1,0],\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]])\n\n#task_goal_list = [5, 10, 15, 20, 25, 30, 35, 40, 45, 50]\ntask_goal_list = [50]\nNbrofTests = 5\navg_list = []\nfor task_goal in task_goal_list:\n print('Goal: ', task_goal)\n timeList = []\n for i in range(NbrofTests):\n print(i+1)\n completed_tasks = 0\n shelfPositions = []\n shelfPriority = []\n timeList.append(run_test(shelf_test_matrix,task_goal))\n\n\n timeList = np.array(timeList)\n print('Times: ', timeList)\n avgTime = np.sum(timeList)/NbrofTests\n print('Avg Time: ',avgTime)\n avg_list.append(avgTime)\n\n\nplt.figure(3)\nplt.plot(task_goal_list, avg_list, 'k.')\nplt.show()\n","repo_name":"matildawikstrom/Warehouse-Project","sub_path":"run_tests_code.py","file_name":"run_tests_code.py","file_ext":"py","file_size_in_byte":11953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10227221387","text":"import random\r\nimport os\r\nimport arcade\r\n\r\nfrom Background import Background\r\nfrom BuildingTools import BuildingTools\r\n# from GameOver import GameOverView\r\nfrom Player import Player\r\nfrom Wall import Wall\r\nfrom Companion import Companion\r\nfrom enemy import Enemy\r\nimport enemy as en\r\nfrom Radio import Radio\r\nfrom MapGeneration import generateMap\r\nfrom Spikes import Spikes\r\nfrom Tree import Tree\r\nfrom Temple import Temple\r\n\r\nSCREEN_TITLE = \"Gummy terror\"\r\n\r\nSCREEN_WIDTH = 800\r\nSCREEN_HEIGHT = 600\r\n\r\nTILE_SIZE = 64\r\nMAP_TILE_WIDTH = 100\r\nMAP_TILE_HEIGHT = 100\r\nTOTAL_WIDTH = TILE_SIZE * MAP_TILE_WIDTH\r\nTOTAL_HEIGHT = TILE_SIZE * MAP_TILE_HEIGHT\r\n\r\nHORIZONTAL_TILES_COUNT = SCREEN_WIDTH // TILE_SIZE\r\nVERTICAL_TILES_COUNT = SCREEN_HEIGHT // TILE_SIZE # up down\r\n\r\nPLAYER_SPEED = 6\r\nPLAYER_SCALING = 1\r\n\r\nROCK_SCALING = 0.1\r\nENEMY_SCALING = 1\r\nDIFFICULTY = 5\r\nSPRITE_IMAGE_SIZE = 128\r\nSPRITE_SCALING = 0.25\r\nSPRITE_SIZE = int(SPRITE_IMAGE_SIZE * SPRITE_SCALING)\r\n\r\n\r\nwindow = None\r\n\r\nclass TitleAnime(arcade.Sprite):\r\n def __init__(self):\r\n super().__init__()\r\n self.center_x = SCREEN_WIDTH / 2\r\n self.center_y = SCREEN_HEIGHT / 2\r\n self.sheet = arcade.load_spritesheet(f\"images/title.png\", 800, 600, 6, 6, 0)\r\n self.cur_texture = 0\r\n\r\n def update_animation(self, delta_time: float = 1 / 60):\r\n self.cur_texture += 1\r\n if self.cur_texture > 6 * 4 - 1:\r\n self.cur_texture = 0\r\n frame = self.cur_texture // 4\r\n self.texture = self.sheet[frame]\r\n\r\n\r\n\r\nclass TitleView(arcade.View):\r\n def __init__(self):\r\n super().__init__()\r\n self.width = SCREEN_WIDTH\r\n self.height = SCREEN_HEIGHT\r\n self.title = TitleAnime()\r\n\r\n def setup(self):\r\n self.clear()\r\n\r\n def on_show(self):\r\n self.clear()\r\n\r\n def on_draw(self):\r\n self.clear()\r\n self.title.draw()\r\n\r\n def on_update(self, delta_time):\r\n self.title.update_animation()\r\n\r\n def on_mouse_press(self, x, y, button, modifiers):\r\n game_view = GameView()\r\n game_view.setup()\r\n self.window.show_view(game_view)\r\n\r\nclass GameView(arcade.View):\r\n \"\"\" Main Window \"\"\"\r\n\r\n def __init__(self):\r\n \"\"\" Create the variables \"\"\"\r\n # Init the parent class\r\n super().__init__()\r\n arcade.set_background_color(arcade.csscolor.DARK_RED)\r\n self.scene = None\r\n self.player_sprite = None\r\n self.numOfEnemies = 10\r\n self.rounds = 0\r\n # building shit\r\n self.buildingSquare_sprite = None\r\n self.buildThorns = False\r\n\r\n self.physics_engine = None\r\n self.tree_engine = None\r\n self.enemies_engine = None\r\n\r\n self.mouse_position_x = 0\r\n self.mouse_position_y = 0\r\n\r\n self.on_screen_pointer_x = 0\r\n self.on_screen_pointer_y = 0\r\n self.left_pressed = False\r\n self.right_pressed = False\r\n self.up_pressed = False\r\n self.down_pressed = False\r\n self.c_pressed = False\r\n self.b_pressed = False\r\n self.k_pressed = False\r\n self.l_pressed = False\r\n\r\n self.music = None\r\n\r\n self.templeLevel = 0\r\n self.sounds = {}\r\n self.user_volume = 0.8\r\n self.music_volume = 0.4\r\n\r\n # Don't show the mouse cursor\r\n self.window.set_mouse_visible(False)\r\n self.gui_camera = None\r\n\r\n self.graphicsTextures = {}\r\n self.enemy_texture_list = en.loadTextures()\r\n self.mapList = None\r\n\r\n # Trees\r\n self.tree_sprite_list = arcade.SpriteList()\r\n # self.flower_sprite_list = arcade.SpriteList()\r\n\r\n self.treeToDraw = None\r\n\r\n def setup(self):\r\n \"\"\" Set up everything with the game \"\"\"\r\n self.scene = arcade.Scene()\r\n self.templeLevel = 0\r\n self.createBackground()\r\n\r\n self.scene.add_sprite_list(\"Player\")\r\n # self.music = arcade.load_sound(\"sounds/dark.webm\")\r\n # self.music.play()\r\n\r\n self.player_sprite = Player()\r\n self.player_sprite.position = [SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2]\r\n self.scene.add_sprite(\"Player\", self.player_sprite)\r\n\r\n self.scene.add_sprite_list(\"Enemies\")\r\n self.scene.add_sprite_list(\"Companions\")\r\n self.scene.add_sprite_list(\"Effects\")\r\n self.scene.add_sprite_list(\"Projectiles\")\r\n self.scene.add_sprite_list(\"Walls\")\r\n self.scene.add_sprite_list(\"Non-walkable things\")\r\n self.scene.add_sprite_list(\"Monument\")\r\n self.scene.add_sprite_list(\"Background\")\r\n self.scene.add_sprite_list(\"Building tools\")\r\n self.scene.add_sprite_list(\"Flowers\")\r\n self.scene.add_sprite_list(\"Temple\")\r\n\r\n self.vse = arcade.SpriteList()\r\n self.spiky = arcade.SpriteList()\r\n\r\n self.sounds[\"swordAttack\"] = arcade.load_sound(\"sounds/sword_strike2.wav\")\r\n self.sounds[\"hit\"] = arcade.load_sound(\":resources:sounds/hurt2.wav\")\r\n self.sounds[\"wall\"] = arcade.load_sound(\"sounds/wall-crash.wav\")\r\n self.sounds[\"thorns\"] = arcade.load_sound(\"sounds/thorns.wav\")\r\n self.sounds[\"clearGround\"] = arcade.load_sound(\":resources:sounds/upgrade1.wav\")\r\n\r\n \r\n self.radio = Radio()\r\n self.radio.position = [SCREEN_WIDTH - 118, 134]\r\n self.scene.add_sprite(\"Effects\", self.radio)\r\n self.sounds[\"sabaton\"] = arcade.load_sound(\"sounds/8bit.mp3\")\r\n self.sounds[\"sabaton\"].play(self.music_volume, loop=True)\r\n\r\n \"\"\"for i in range(6):\r\n rock_sprite = Wall(\"images/rock.png\", ROCK_SCALING)\r\n rock_sprite.position = [random.randint(0, SCREEN_WIDTH), random.randint(0, SCREEN_HEIGHT)]\r\n self.scene.add_sprite(\"Walls\", rock_sprite)\"\"\"\r\n\r\n self.createWalls(100)\r\n\r\n \"\"\"enemy_sprite = arcade.Sprite(\"images/ememy.png\", ENEMY_SCALING)\r\n enemy_sprite.position = [random.randint(0, SCREEN_WIDTH), random.randint(0, SCREEN_HEIGHT)]\r\n self.scene.add_sprite(\"Enemies\", enemy_sprite)\"\"\"\r\n\r\n self.createEnemies()\r\n self.createBorder()\r\n\r\n self.physics_engine = arcade.PhysicsEngineSimple(\r\n self.player_sprite, self.scene.get_sprite_list(\"Walls\")\r\n )\r\n self.tree_engine = arcade.PhysicsEngineSimple(\r\n self.player_sprite, self.tree_sprite_list\r\n )\r\n\r\n self.textures_sheet = arcade.load_spritesheet(f\"images/player.png\", 128, 64, 12, 60, 0)\r\n self.gui_camera = arcade.Camera(self.window.width, self.window.height)\r\n\r\n def createBackground(self):\r\n imagePath = \"images/tiles/cobble/\"\r\n\r\n self.mapList = generateMap(MAP_TILE_WIDTH, MAP_TILE_HEIGHT)\r\n for row in range(len(self.mapList)):\r\n for column in range(len(self.mapList[row])):\r\n picName = self.mapList[row][column]\r\n if picName == \"GGGG\":\r\n if random.randint(0, 100) > 90:\r\n picName = \"GGGG2\"\r\n if random.randint(0, 1000) > 971:\r\n self.plantTrees(-TOTAL_WIDTH // 2 + column * TILE_SIZE,\r\n -TOTAL_HEIGHT // 2 + row * TILE_SIZE + 192)\r\n elif random.randint(0, 1000) > 901:\r\n self.plantFlowers(-TOTAL_WIDTH // 2 + column * TILE_SIZE, -TOTAL_HEIGHT // 2 + row * TILE_SIZE)\r\n\r\n elif picName == \"GRGR\" and random.randint(0, 100) > 50:\r\n picName = \"GRGR2\"\r\n\r\n elif picName == \"RGRG\" and random.randint(0, 100) > 50:\r\n picName = \"RGRG2\"\r\n\r\n elif picName == \"RRRR\" and random.randint(0, 100) > 75:\r\n picName = \"RRRR2\"\r\n\r\n elif picName == \"RRRR\" and random.randint(0, 100) > 66:\r\n picName = \"RRRR3\"\r\n\r\n if picName == \"GGGG2\":\r\n ANGLE = random.choice([0, 90, 180, 270])\r\n\r\n elif picName == \"RRRR2\":\r\n ANGLE = random.choice([0, 180])\r\n\r\n elif picName == \"RRRR3\":\r\n ANGLE = random.choice([0, 180])\r\n\r\n else:\r\n ANGLE = 0\r\n\r\n finalPath = imagePath + picName + \".png\"\r\n background_sprite = Background(finalPath, 1, TILE_SIZE, ANGLE)\r\n\r\n background_sprite.position = [-TOTAL_WIDTH // 2 + column * TILE_SIZE,\r\n -TOTAL_HEIGHT // 2 + row * TILE_SIZE]\r\n # background_sprite.position = [-TOTAL_WIDTH//2 + column * TILE_SIZE + column, -TOTAL_HEIGHT // 2 + row * TILE_SIZE + row]\r\n self.scene.add_sprite(\"Background\", background_sprite)\r\n \r\n def createBorder(self):\r\n for wideIndex in range(-10, MAP_TILE_WIDTH + 10 + 2):\r\n for highIndex in range(-10, MAP_TILE_HEIGHT + 10 + 2):\r\n if wideIndex <= 0 or highIndex <= 0 or wideIndex >= MAP_TILE_WIDTH + 1 or highIndex >= MAP_TILE_HEIGHT + 1:\r\n\r\n border_sprite = Wall(\"images/rocks/better_wall.png\", 1, TILE_SIZE, 999999999, 1, 0)\r\n border_sprite.position = [-TOTAL_WIDTH // 2 - TILE_SIZE + TILE_SIZE * wideIndex,\r\n -TOTAL_HEIGHT // 2 - TILE_SIZE + TILE_SIZE * highIndex]\r\n\r\n self.scene.add_sprite(\"Walls\", border_sprite)\r\n self.vse.append(border_sprite)\r\n\r\n def createWalls(self, WALL_COUNT_INITIAL):\r\n image_list = [\"images/rocks/2.png\", \"images/rocks/3.png\"]\r\n for i in range(WALL_COUNT_INITIAL):\r\n image_no = random.randint(0, len(image_list) - 1)\r\n # size =\r\n rock_sprite = Wall(image_list[image_no], 1, TILE_SIZE, 1000, 0, 0)\r\n\r\n rock_sprite.position = [random.randint(-TOTAL_WIDTH // 2, TOTAL_WIDTH // 2),\r\n random.randint(-TOTAL_HEIGHT // 2, TOTAL_HEIGHT // 2)]\r\n\r\n self.scene.add_sprite(\"Walls\", rock_sprite)\r\n self.vse.append(rock_sprite)\r\n\r\n def buildWall(self):\r\n for dictionary in [\"Player\", \"Enemies\", \"Companions\", \"Walls\", \"Non-walkable things\", \"Monument\"]:\r\n if len(arcade.check_for_collision_with_list(self.buildingSquare_sprite, self.scene[dictionary])) > 0:\r\n break\r\n else:\r\n build = False\r\n if self.buildThorns and self.player_sprite.coins >= 30:\r\n self.player_sprite.coins -= 30\r\n rock_sprite2 = Spikes(\"images/rocks/spikes.png\", 1, TILE_SIZE, 40, 1, 30)\r\n rock_sprite = Wall(\"images/rocks/spikes.png\", 1, TILE_SIZE, 40, 1, 30)\r\n rock_sprite2.position = [self.on_screen_pointer_x, self.on_screen_pointer_y]\r\n self.spiky.append(rock_sprite2)\r\n build = True\r\n else:\r\n if self.buildThorns == False:\r\n if self.player_sprite.coins >= 15:\r\n self.player_sprite.coins -= 15\r\n rock_sprite = Wall(\"./images/rocks/better_wall.png\", 1, TILE_SIZE, 100, 0, 20)\r\n rock_sprite2 = Spikes(\"./images/rocks/better_wall.png\", 1, TILE_SIZE, 100, 0, 20)\r\n rock_sprite2.position = [self.on_screen_pointer_x, self.on_screen_pointer_y]\r\n self.spiky.append(rock_sprite2)\r\n build = True\r\n if build:\r\n rock_sprite.position = [self.on_screen_pointer_x, self.on_screen_pointer_y]\r\n self.scene.add_sprite(\"Walls\", rock_sprite)\r\n self.vse.append(rock_sprite)\r\n\r\n def plantFlowers(self, x, y):\r\n path = \"images/grass/\"\r\n listOfFlower = os.listdir(path)\r\n flower_sprite = Tree(path + random.choice(listOfFlower), 1, 150)\r\n flower_sprite.position = [x, y]\r\n # self.flower_sprite_list.append(flower_sprite)\r\n self.scene.add_sprite(\"Flowers\", flower_sprite)\r\n\r\n def plantTrees(self, x, y):\r\n path = \"images/trees/\"\r\n listOfTrees = os.listdir(path)\r\n\r\n tree_sprite = Tree(path + random.choice(listOfTrees), 1, 30)\r\n\r\n \"\"\"hit_boxes = [\r\n [tree_sprite.center_x - 10, tree_sprite.center_y],\r\n [tree_sprite.center_x + 10, tree_sprite.center_y],\r\n [tree_sprite.center_x - 10, tree_sprite.center_y - 10],\r\n [tree_sprite.center_x + 10, tree_sprite.center_y - 10]\r\n ]\r\n tree_sprite.set_hit_box(hit_boxes)\"\"\"\r\n # tree_sprite.phys = arcade.PhysicsEngineSimple(tree_sprite, self.player_sprite)\r\n tree_sprite.position = [x, y]\r\n\r\n # self.scene.add_sprite(\"Trees\", tree_sprite)\r\n self.tree_sprite_list.append(tree_sprite)\r\n \"\"\"for i in self.scene[\"Trees\"]:\r\n print(i)\"\"\"\r\n\r\n def createBuildTool(self):\r\n self.buildingSquare_sprite = BuildingTools(\"images/build tools/rect.png\", 1, TILE_SIZE)\r\n self.buildingSquare_sprite.position = [self.player_sprite.center_x + self.mouse_position_x - SCREEN_WIDTH // 2,\r\n self.player_sprite.center_y + self.mouse_position_y - SCREEN_HEIGHT // 2]\r\n self.scene.add_sprite(\"Building tools\", self.buildingSquare_sprite)\r\n\r\n def createEnemies(self):\r\n self.rounds += 1\r\n ENEMY_COUNT_INITIAL = self.numOfEnemies + (self.rounds // 3)\r\n\r\n for i in range(ENEMY_COUNT_INITIAL):\r\n colour_scheme = random.choice(self.enemy_texture_list)\r\n enemy_sprite = Enemy(colour_scheme)\r\n enemy_sprite.timer_rand = 0\r\n enemy_sprite.timer_smart = 0\r\n enemy_sprite.life += (5 * self.rounds)\r\n enemy_sprite.value += (3 * self.rounds)\r\n enemy_sprite.position = [random.randint(0, SCREEN_WIDTH), random.randint(0, SCREEN_HEIGHT)]\r\n enemy_sprite.phys = arcade.PhysicsEngineSimple(enemy_sprite, self.vse)\r\n\r\n if self.player_sprite.center_x > enemy_sprite.center_x:\r\n enemy_sprite.change_x = random.randint(1, 3)\r\n else:\r\n enemy_sprite.change_x -= random.randint(1, 3)\r\n if self.player_sprite.center_y > enemy_sprite.center_y:\r\n enemy_sprite.change_y = random.randint(1, 3)\r\n else:\r\n enemy_sprite.change_y -= random.randint(1, 3)\r\n \"\"\"\r\n enemy_sprite.enemies_engine_walls = arcade.PhysicsEngineSimple(\r\n enemy_sprite, self.scene.get_sprite_list(\"Walls\")\r\n )\r\n\r\n enemy_sprite.enemies_engine_trees = arcade.PhysicsEngineSimple(\r\n enemy_sprite, self.tree_sprite_list\r\n )\r\n \"\"\"\r\n self.scene.add_sprite(\"Enemies\", enemy_sprite)\r\n self.vse.append(enemy_sprite)\r\n\r\n def on_mouse_motion(self, x: float, y: float, dx: float, dy: float):\r\n self.mouse_position_x = x\r\n self.mouse_position_y = y\r\n\r\n \"\"\"if self.b_pressed:\r\n self.buildingSquare_sprite.center_x = self.player_sprite.center_x + self.mouse_position_x - SCREEN_WIDTH//2\r\n self.buildingSquare_sprite.center_y = self.player_sprite.center_y + self.mouse_position_y - SCREEN_HEIGHT//2\"\"\"\r\n\r\n def on_mouse_press(self, x, y, key, modifiers):\r\n if self.b_pressed:\r\n self.buildWall()\r\n else:\r\n self.attack()\r\n\r\n def on_key_press(self, key, modifiers):\r\n if key == arcade.key.W or key == arcade.key.UP:\r\n self.up_pressed = True\r\n self.player_sprite.move(\"U\")\r\n elif key == arcade.key.S or key == arcade.key.DOWN:\r\n self.down_pressed = True\r\n self.player_sprite.move(\"D\")\r\n elif key == arcade.key.A or key == arcade.key.LEFT:\r\n self.left_pressed = True\r\n self.player_sprite.move(\"L\")\r\n elif key == arcade.key.D or key == arcade.key.RIGHT:\r\n self.right_pressed = True\r\n self.player_sprite.move(\"R\")\r\n\r\n elif key == arcade.key.C:\r\n self.c_pressed = True\r\n\r\n if key == arcade.key.K:\r\n self.k_pressed = True\r\n elif key == arcade.key.L:\r\n self.l_pressed = True\r\n\r\n elif key == arcade.key.B:\r\n self.b_pressed = not self.b_pressed\r\n if self.b_pressed:\r\n self.createBuildTool()\r\n else:\r\n self.buildingSquare_sprite.remove_from_sprite_lists()\r\n elif key == arcade.key.U:\r\n self.c_pressed = False\r\n self.b_pressed = False\r\n self.up_pressed = False\r\n self.down_pressed = False\r\n self.left_pressed = False\r\n self.right_pressed = False\r\n\r\n upgrade_view = UpgradeView(self, self.player_sprite.center_x, self.player_sprite.center_y)\r\n # pause_view.setup()\r\n self.window.show_view(upgrade_view)\r\n\r\n if key == arcade.key.P:\r\n self.c_pressed = False\r\n self.b_pressed = False\r\n self.up_pressed = False\r\n self.down_pressed = False\r\n self.left_pressed = False\r\n self.right_pressed = False\r\n\r\n pause_view = PauseView(self, SCREEN_WIDTH, SCREEN_HEIGHT, self.player_sprite.center_x,\r\n self.player_sprite.center_y)\r\n # pause_view.setup()\r\n self.window.show_view(pause_view)\r\n\r\n if key == arcade.key.T:\r\n self.buildTemple()\r\n\r\n if self.b_pressed:\r\n if key == arcade.key.KEY_2:\r\n self.buildThorns = True\r\n if key == arcade.key.KEY_1:\r\n self.buildThorns = False\r\n\r\n if self.k_pressed:\r\n if self.user_volume > 0.2:\r\n self.user_volume -= 0.1\r\n self.k_pressed = False\r\n elif self.l_pressed:\r\n if self.user_volume < 1:\r\n self.user_volume += 0.1\r\n self.l_pressed = False\r\n\r\n # self.buildWall()\r\n\r\n def on_key_release(self, key, modifiers):\r\n\r\n if key == arcade.key.W or key == arcade.key.UP:\r\n self.up_pressed = False\r\n elif key == arcade.key.S or key == arcade.key.DOWN:\r\n self.down_pressed = False\r\n elif key == arcade.key.A or key == arcade.key.LEFT:\r\n self.left_pressed = False\r\n elif key == arcade.key.D or key == arcade.key.RIGHT:\r\n self.right_pressed = False\r\n if key == arcade.key.LCTRL and self.player_sprite.stamina > 0:\r\n # self.player_sprite.change_x + 2\r\n # self.player_sprite.change_y + 2\r\n self.player_sprite.stamina -= 1\r\n\r\n def buildTemple(self):\r\n\r\n temple_sprite = Temple()\r\n\r\n if self.templeLevel == 0:\r\n if self.player_sprite.coins >= temple_sprite.levelCost[0]:\r\n # path = \"images/grass/\"\r\n # listOfFlower = os.listdir(path)\r\n temple_sprite.position = [self.player_sprite.center_x, self.player_sprite.center_y]\r\n temple_sprite.texture = temple_sprite.textures[0]\r\n # self.flower_sprite_list.append(flower_sprite)\r\n self.scene.add_sprite(\"Temple\", temple_sprite)\r\n self.player_sprite.coins -= temple_sprite.levelCost[0]\r\n self.templeLevel += 1\r\n else:\r\n temple_sprite = self.scene[\"Temple\"][0]\r\n\r\n if self.player_sprite.coins >= temple_sprite.levelCost[self.templeLevel] and \\\r\n arcade.check_for_collision(self.player_sprite, temple_sprite):\r\n temple_sprite.texture = temple_sprite.textures[self.templeLevel]\r\n self.player_sprite.coins -= temple_sprite.levelCost[0]\r\n # temple_sprite.update()\r\n self.templeLevel += 1\r\n if self.templeLevel == 3: self.win()\r\n\r\n # companion_sprite = Companion()\r\n # self.player_sprite.coins -= 100\r\n # companion_sprite.position = [self.player_sprite.center_x, self.player_sprite.center_y]\r\n # self.scene.add_sprite(\"Companions\", companion_sprite)\r\n\r\n def win(self):\r\n view = VictoryView(SCREEN_WIDTH, SCREEN_HEIGHT)\r\n self.window.show_view(view)\r\n\r\n\r\n def attack(self):\r\n self.sounds[\"swordAttack\"].play(self.user_volume)\r\n self.player_sprite.is_attacking = True\r\n\r\n addVector = [0, 0]\r\n if self.player_sprite.viewP[0] < 0:\r\n addVector[0] = -2\r\n else:\r\n addVector[0] = 2\r\n if self.player_sprite.viewP[1] < 0:\r\n addVector[1] = -2\r\n else:\r\n addVector[1] = 2\r\n\r\n # for enemy in self.scene[\"Enemies\"]:\r\n # hit = False\r\n '''\r\n if self.player_sprite.viewP[0] <= 0:\r\n if enemy.center_x - (self.player_sprite.center_x + addVector[0]) < 0 and \\\r\n abs(enemy.center_y - self.player_sprite.center_y) < 10:\r\n hit = True\r\n elif self.player_sprite.viewP[0] > 0:\r\n if enemy.center_x - (self.player_sprite.center_x + addVector[0]) >= 0 and \\\r\n abs(enemy.center_y - self.player_sprite.center_y) < 10:\r\n hit = True\r\n elif self.player_sprite.viewP[1] <= 0:\r\n if enemy.center_y - (self.player_sprite.center_y + addVector[1]) >= 0 and \\\r\n abs(enemy.center_x - self.player_sprite.center_x) < 10:\r\n hit = True\r\n elif self.player_sprite.viewP[1] > 0:\r\n if enemy.center_y - (self.player_sprite.center_y + addVector[1]) < 0 and \\\r\n abs(enemy.center_x - self.player_sprite.center_x) < 10:\r\n hit = True\r\n '''\r\n enemies = arcade.check_for_collision_with_list(self.player_sprite.sword, self.scene[\"Enemies\"])\r\n if len(enemies) > 0:\r\n for enemy in enemies:\r\n\r\n # if hit:\r\n s = self.sounds[\"hit\"]\r\n s.play(self.user_volume)\r\n enemy.life -= 1\r\n if enemy.life <= 0:\r\n self.player_sprite.coins += 10 * enemy.scale\r\n self.scene[\"Enemies\"].remove(enemy)\r\n enemy.kill()\r\n if len(self.scene[\"Enemies\"]) <= 0:\r\n snd = arcade.load_sound(\":resources:sounds/upgrade1.wav\")\r\n self.sounds[\"clearGround\"].play(self.user_volume)\r\n\r\n def on_update(self, delta_time):\r\n \"\"\" Movement and game logic \"\"\"\r\n self.player_sprite.change_x = 0\r\n self.player_sprite.change_y = 0\r\n if self.up_pressed and not self.down_pressed:\r\n self.player_sprite.change_y += PLAYER_SPEED\r\n elif self.down_pressed and not self.up_pressed:\r\n self.player_sprite.change_y -= PLAYER_SPEED\r\n if self.left_pressed and not self.right_pressed:\r\n self.player_sprite.change_x -= PLAYER_SPEED\r\n elif self.right_pressed and not self.left_pressed:\r\n self.player_sprite.change_x += PLAYER_SPEED\r\n self.player_sprite.center_x += self.player_sprite.change_x\r\n self.player_sprite.center_y += self.player_sprite.change_y\r\n self.physics_engine.update()\r\n self.tree_engine.update()\r\n\r\n arcade.set_viewport(self.player_sprite.center_x - SCREEN_WIDTH / 2,\r\n self.player_sprite.center_x + SCREEN_WIDTH / 2,\r\n self.player_sprite.center_y - SCREEN_HEIGHT / 2,\r\n self.player_sprite.center_y + SCREEN_HEIGHT / 2)\r\n # enemy_speedy = 0.5 # + (self.player_sprite.difficulty / 12)\r\n for enemy in self.scene[\"Enemies\"]:\r\n y_pos = enemy.center_y\r\n x_pos = enemy.center_x\r\n if self.player_sprite.center_y > y_pos:\r\n enemy.dir_y = 1\r\n if self.player_sprite.center_x > x_pos:\r\n enemy.dir_x = 1\r\n if self.player_sprite.center_y <= y_pos:\r\n enemy.dir_y = -1\r\n if self.player_sprite.center_x <= x_pos:\r\n enemy.dir_x = -1\r\n\r\n if len(arcade.check_for_collision_with_list(enemy, self.spiky)) > 0:\r\n enemy.change_x *= -1\r\n enemy.change_y *= -1\r\n hitList = arcade.check_for_collision_with_list(enemy, self.spiky)\r\n for c in hitList:\r\n if c.lives <= 0:\r\n if c.damage == 0:\r\n choice = \"wall\"\r\n\r\n else:\r\n choice = \"thorns\"\r\n self.sounds[choice].play(self.user_volume)\r\n\r\n hitList = arcade.check_for_collision_with_list(c, self.scene[\"Walls\"])\r\n for a in hitList:\r\n try:\r\n a.kill()\r\n except:\r\n pass\r\n\r\n c.kill()\r\n c.lives -= 1\r\n\r\n enemy.life -= c.damage\r\n if enemy.life <= 0:\r\n enemy.kill()\r\n self.player_sprite.coins = enemy.value\r\n c.lives -= 1\r\n\r\n if len(arcade.check_for_collision_with_list(enemy, self.scene[\"Walls\"])) > 0:\r\n enemy.change_x *= -1\r\n enemy.change_y *= -1\r\n hitList = arcade.check_for_collision_with_list(enemy, self.scene[\"Walls\"])\r\n for c in hitList:\r\n if c.lives <= 0:\r\n choice = \"\"\r\n if c.damage == 0:\r\n choice = \"wall\"\r\n\r\n else:\r\n choice = \"thorns\"\r\n\r\n self.sounds[choice].play(self.user_volume)\r\n c.kill()\r\n c.lives -= 1\r\n\r\n enemy.life -= c.damage\r\n if enemy.life <= 0:\r\n enemy.kill()\r\n self.player_sprite.coins = enemy.value\r\n c.lives -= 1\r\n\r\n if len(arcade.check_for_collision_with_list(enemy, self.tree_sprite_list)) > 0:\r\n enemy.change_x *= -1\r\n enemy.change_y *= -1\r\n hitList = arcade.check_for_collision_with_list(enemy, self.tree_sprite_list)\r\n for c in hitList:\r\n\r\n if c.lives <= 0:\r\n # self.sounds[choice].play()\r\n c.kill()\r\n c.lives -= 1\r\n\r\n hitListEnemy = arcade.check_for_collision_with_list(self.player_sprite, self.scene[\"Enemies\"])\r\n if len(hitListEnemy) > 0:\r\n for enemy in hitListEnemy:\r\n if self.player_sprite.health <= 0:\r\n self.player_sprite.kill()\r\n view = GameOverView(SCREEN_WIDTH, SCREEN_HEIGHT)\r\n self.window.show_view(view)\r\n self.player_sprite.health -= enemy.scale\r\n if enemy.center_x < self.player_sprite.center_x:\r\n enemy.center_x -= 30\r\n elif enemy.center_x >= self.player_sprite.center_x:\r\n enemy.center_x += 30\r\n if enemy.center_y < self.player_sprite.center_y:\r\n enemy.center_y -= 30\r\n elif enemy.center_y >= self.player_sprite.center_y:\r\n enemy.center_y += 30\r\n\r\n self.on_screen_pointer_x = self.player_sprite.center_x + self.mouse_position_x - SCREEN_WIDTH // 2\r\n self.on_screen_pointer_y = self.player_sprite.center_y + self.mouse_position_y - SCREEN_HEIGHT // 2\r\n\r\n if self.b_pressed:\r\n self.buildingSquare_sprite.center_x = self.on_screen_pointer_x\r\n self.buildingSquare_sprite.center_y = self.on_screen_pointer_y\r\n\r\n if self.c_pressed:\r\n self.c_pressed = False\r\n if self.player_sprite.coins >= 100:\r\n companion_sprite = Companion()\r\n self.player_sprite.coins -= 100\r\n companion_sprite.position = [self.player_sprite.center_x, self.player_sprite.center_y]\r\n self.scene.add_sprite(\"Companions\", companion_sprite)\r\n\r\n for creeper in self.scene[\"Companions\"]:\r\n closest = arcade.get_closest_sprite(creeper, self.scene[\"Enemies\"])\r\n if closest is not None:\r\n if creeper.center_x > closest[0].center_x:\r\n creeper.change_x = -0.5 # potom private static final SPEED\r\n elif creeper.center_x < closest[0].center_x:\r\n creeper.change_x = 0.5\r\n if creeper.center_y > closest[0].center_y:\r\n creeper.change_y = -0.5 # potom private static final SPEED\r\n elif creeper.center_y < closest[0].center_y:\r\n creeper.change_y = 0.5\r\n\r\n creeper.update()\r\n creeper.update_animation()\r\n\r\n companioncolider = arcade.check_for_collision_with_list(creeper, self.scene[\"Enemies\"])\r\n if len(companioncolider) > 0:\r\n creeper.kaboom = True\r\n creeper.walk = False\r\n for c in companioncolider:\r\n if creeper.alive and not creeper.exploded:\r\n c.life -= 5\r\n if c.life <= 0:\r\n c.kill()\r\n self.player_sprite.coins += c.value\r\n creeper.exploded = True\r\n\r\n for e in self.scene[\"Enemies\"]:\r\n e.update_animation()\r\n e.phys.update()\r\n # e.enemies_engine_walls.update()\r\n # e.enemies_engine_trees.update()\r\n\r\n self.player_sprite.update()\r\n self.player_sprite.update_animation()\r\n self.radio.update_animation()\r\n\r\n # strom kolize\r\n tree_colider = arcade.check_for_collision_with_list(self.player_sprite, self.tree_sprite_list)\r\n\r\n # print(self.scene[\"Trees\"])\r\n # print(\"ooof\", len(tree_colider))\r\n\r\n for tree in tree_colider:\r\n if self.player_sprite.bottom < tree.bottom:\r\n self.player_sprite.draw()\r\n else:\r\n self.treeToDraw = tree\r\n # tree.draw()\r\n\r\n if (len(self.scene[\"Enemies\"])) == 0:\r\n self.createEnemies()\r\n\r\n def on_draw(self):\r\n \"\"\" Draw everything \"\"\"\r\n self.clear()\r\n\r\n self.scene.draw()\r\n self.tree_sprite_list.draw()\r\n self.player_sprite.hood.draw()\r\n self.player_sprite.sword.draw()\r\n\r\n try:\r\n self.treeToDraw.draw()\r\n except:\r\n pass\r\n\r\n self.gui_camera.use()\r\n\r\n # Draw our score on the screen, scrolling it with the viewport\r\n nums = len(self.scene[\"Enemies\"])\r\n score_text = f\"Coins: {self.player_sprite.coins}\\n\" + \\\r\n f\"Enemies: {nums}\"\r\n\r\n arcade.draw_rectangle_filled(0, 600, 600, 80, arcade.csscolor.BEIGE)\r\n arcade.draw_text(\r\n score_text,\r\n 10,\r\n 575,\r\n arcade.csscolor.BLACK,\r\n 18,\r\n )\r\n arcade.draw_text(\r\n \"vol \" + str(int(self.user_volume * 100)) + \"%\",\r\n 690,\r\n 575,\r\n arcade.csscolor.BLACK,\r\n 18,\r\n )\r\n\r\nclass TitleLose(arcade.Sprite):\r\n def __init__(self):\r\n super().__init__()\r\n self.center_x = SCREEN_WIDTH / 2\r\n self.center_y = SCREEN_HEIGHT / 2\r\n self.sheet = arcade.load_spritesheet(f\"images/title_lose.png\", 800, 600, 10, 10, 0)\r\n self.cur_texture = 0\r\n\r\n def update_animation(self, delta_time: float = 1 / 60):\r\n self.cur_texture += 1\r\n if self.cur_texture > 10 * 4 - 1:\r\n self.cur_texture = 0\r\n frame = self.cur_texture // 4\r\n self.texture = self.sheet[frame]\r\n\r\nclass GameOverView(arcade.View):\r\n \"\"\" View to show when game is over \"\"\"\r\n\r\n def __init__(self, SCREEN_WIDTH, SCREEN_HEIGHT):\r\n \"\"\" This is run once when we switch to this view \"\"\"\r\n super().__init__()\r\n self.texture = arcade.load_texture(\"images/gameOver.jpg\")\r\n self.width = SCREEN_WIDTH\r\n self.height = SCREEN_HEIGHT\r\n # Reset the viewport, necessary if we have a scrolling game and we need\r\n # to reset the viewport back to the start so we can see what we draw.\r\n self.title = TitleLose()\r\n arcade.set_viewport(0, SCREEN_WIDTH - 1, 0, SCREEN_HEIGHT - 1)\r\n\r\n def on_draw(self):\r\n \"\"\" Draw this view \"\"\"\r\n self.clear()\r\n self.title.draw()\r\n\r\n def on_update(self, delta_time):\r\n self.title.update_animation()\r\n\r\n def on_mouse_press(self, _x, _y, _button, _modifiers):\r\n \"\"\" If the user presses the mouse button, re-start the game. \"\"\"\r\n game_view = GameView()\r\n game_view.setup()\r\n self.window.show_view(game_view)\r\n\r\n\r\nclass PauseView(arcade.View):\r\n def __init__(self, game_view, WIDTH, HEIGHT, px, py):\r\n super().__init__()\r\n self.game_view = game_view\r\n self.width = WIDTH\r\n self.height = HEIGHT\r\n self.px = px\r\n self.py = py\r\n\r\n def on_show(self):\r\n arcade.set_background_color(arcade.color.ORANGE)\r\n\r\n def on_draw(self):\r\n self.clear()\r\n\r\n # Draw player, for effect, on pause screen.\r\n # The previous View (GameView) was passed in\r\n # and saved in self.game_view.\r\n # player_sprite = self.game_view.player_sprite\r\n # player_sprite.draw()\r\n\r\n # draw an orange filter over him\r\n # arcade.draw_lrtb_rectangle_filled(left=player_sprite.left,\r\n # right=player_sprite.right,\r\n # top=player_sprite.top,\r\n # bottom=player_sprite.bottom,\r\n # color=arcade.color.ORANGE + (200,))\r\n\r\n arcade.set_background_color(arcade.color.ORANGE)\r\n arcade.draw_text(\"PAUSED\", self.px, self.py + 50,\r\n arcade.color.BLACK, font_size=50, anchor_x=\"center\")\r\n\r\n # Show tip to return or reset\r\n arcade.draw_text(\"Press Enter to return\",\r\n self.px,\r\n self.py,\r\n arcade.color.BLACK,\r\n font_size=20,\r\n anchor_x=\"center\")\r\n arcade.draw_text(\"Press Esc to reset\",\r\n self.px,\r\n self.py - 30,\r\n arcade.color.BLACK,\r\n font_size=20,\r\n anchor_x=\"center\")\r\n\r\n def on_key_press(self, key, _modifiers):\r\n if key == arcade.key.ENTER or key == arcade.key.P: #\r\n self.window.show_view(self.game_view)\r\n elif key == arcade.key.ESCAPE: # reset game\r\n\r\n game_view = GameView()\r\n game_view.setup()\r\n self.window.show_view(game_view)\r\n\r\nclass VictoryView(arcade.View):\r\n def __init__(self, SCREEN_WIDTH, SCREEN_HEIGHT):\r\n \"\"\" This is run once when we switch to this view \"\"\"\r\n super().__init__()\r\n self.texture = arcade.load_texture(\"images/win.png\")\r\n self.width = SCREEN_WIDTH\r\n self.height = SCREEN_HEIGHT\r\n # Reset the viewport, necessary if we have a scrolling game and we need\r\n # to reset the viewport back to the start so we can see what we draw.\r\n arcade.set_viewport(0, SCREEN_WIDTH - 1, 0, SCREEN_HEIGHT - 1)\r\n\r\n def on_draw(self):\r\n \"\"\" Draw this view \"\"\"\r\n self.clear()\r\n self.texture.draw_sized(self.width / 2, self.height / 2,\r\n self.width, self.height)\r\n\r\n def on_mouse_press(self, _x, _y, _button, _modifiers):\r\n \"\"\" If the user presses the mouse button, re-start the game. \"\"\"\r\n game_view = GameView()\r\n game_view.setup()\r\n self.window.show_view(game_view)\r\n\r\n def on_key_press(self, key, _modifiers):\r\n if key == arcade.key.ENTER: #\r\n game_view = GameView()\r\n game_view.setup()\r\n self.window.show_view(game_view)\r\n\r\nclass UpgradeView(arcade.View):\r\n\r\n def __init__(self, game_view, pX, pY):\r\n super().__init__()\r\n self.clear()\r\n self.window.set_mouse_visible(True)\r\n self.game_view = game_view\r\n self.items = {}\r\n self.fillItems()\r\n\r\n self.lastRow = 0\r\n self.lastColumn = 0\r\n self.thisRow = 0\r\n self.thisColumn = 0\r\n\r\n self.pX = pX - 250\r\n self.pY = pY - 250\r\n self.ROW_COUNT = 5\r\n self.COLUMN_COUNT = 5\r\n\r\n # This sets the WIDTH and HEIGHT of each grid location\r\n self.WIDTH = 80\r\n self.HEIGHT = 80\r\n\r\n # This sets the margin between each cell\r\n # and on the edges of the screen.\r\n self.MARGIN = 15\r\n\r\n # Do the math to figure out our screen dimensions\r\n self.SCREEN_WIDTH = (self.WIDTH + self.MARGIN) * self.COLUMN_COUNT + self.MARGIN\r\n self.SCREEN_HEIGHT = (self.HEIGHT + self.MARGIN) * self.ROW_COUNT + self.MARGIN\r\n self.SCREEN_TITLE = \"Array Backed Grid Buffered Example\"\r\n\r\n self.background_color = arcade.color.BLACK\r\n\r\n # One dimensional list of all sprites in the two-dimensional sprite list\r\n self.grid_sprite_list = arcade.SpriteList()\r\n\r\n # This will be a two-dimensional grid of sprites to mirror the two\r\n # dimensional grid of numbers. This points to the SAME sprites that are\r\n # in grid_sprite_list, just in a 2d manner.\r\n self.grid_sprites = []\r\n\r\n for row in range(self.ROW_COUNT):\r\n self.grid_sprites.append([])\r\n for column in range(self.COLUMN_COUNT):\r\n x = column * (self.WIDTH + self.MARGIN) + (self.WIDTH / 2 + self.MARGIN) + self.pX\r\n y = row * (self.HEIGHT + self.MARGIN) + (self.HEIGHT / 2 + self.MARGIN) + self.pY\r\n if row == 1 or column == 0:\r\n sprite = arcade.SpriteSolidColor(self.WIDTH, self.HEIGHT, arcade.csscolor.GHOST_WHITE)\r\n else:\r\n sprite = arcade.SpriteSolidColor(self.WIDTH, self.HEIGHT, arcade.color.WHITE)\r\n sprite.center_x = x\r\n sprite.center_y = y\r\n self.grid_sprite_list.append(sprite)\r\n\r\n self.grid_sprites[row].append(sprite)\r\n\r\n def fillItems(self):\r\n\r\n \"\"\"\r\n key = item\r\n [0] = texture\r\n [1] = [x,y]\r\n [2] = text under\r\n [3] = coins\r\n [4] = bool (taken/free)\r\n [5] = {\"stat\" -> \"bonus\"}\r\n \"\"\"\r\n\r\n ''' \r\n 3 - stojici kape\r\n 6 - stojici mec\r\n 0 - comp\r\n '''\r\n self.items[\"sword\"] = []\r\n self.items[\"sword\"].append(self.game_view.player_sprite.all_textures[6][0][0])\r\n self.items[\"sword\"].append([4, 1])\r\n\r\n self.items[\"comp\"] = []\r\n self.items[\"comp\"].append(self.game_view.player_sprite.all_textures[0][0][0])\r\n self.items[\"comp\"].append([4, 2])\r\n\r\n self.items[\"wall\"] = []\r\n self.items[\"wall\"]\r\n\r\n def setup(self):\r\n self.clear()\r\n\r\n def on_show(self):\r\n self.clear()\r\n arcade.set_background_color(arcade.csscolor.GHOST_WHITE)\r\n\r\n def on_draw(self):\r\n \"\"\"\r\n Render the screen.\r\n \"\"\"\r\n # We should always start by clearing the window pixels\r\n self.clear()\r\n\r\n # Batch draw the grid sprites\r\n self.grid_sprite_list.draw()\r\n\r\n for i in [\"sword\", \"comp\"]:\r\n x = self.items[i][1][0]\r\n y = self.items[i][1][1]\r\n self.grid_sprites[x][y].texture = self.items[i][0]\r\n\r\n if self.thisColumn != self.lastColumn and self.thisRow != self.lastRow:\r\n arcade.draw_text(\"UPGRADE SHOP\",\r\n 50 + self.pX, 50 + self.pY,\r\n arcade.color.BLACK, 12,\r\n anchor_x=\"center\")\r\n\r\n def on_key_press(self, symbol: int, modifiers: int):\r\n\r\n if symbol == arcade.key.ESCAPE:\r\n self.window.show_view(self.game_view)\r\n\r\n def on_mouse_press(self, x, y, button, modifiers):\r\n \"\"\"\r\n Called when the user presses a mouse button.\r\n \"\"\"\r\n\r\n # Convert the clicked mouse position into grid coordinates\r\n column = int((x - 150) // (self.WIDTH + self.MARGIN))\r\n row = int((y - 60) // (self.HEIGHT + self.MARGIN))\r\n\r\n print(f\"Click coordinates: ({x}, {y}). Grid coordinates: ({row}, {column})\")\r\n\r\n # Make sure we are on-grid. It is possible to click in the upper right\r\n # corner in the margin and go to a grid location that doesn't exist\r\n if row >= self.ROW_COUNT or column >= self.COLUMN_COUNT:\r\n # Simply return from this method since nothing needs updating\r\n return\r\n if row < 0 or column < 0:\r\n return\r\n # Flip the color of the sprite\r\n if row == 1 or column == 0: return\r\n if self.grid_sprites[row][column].color == arcade.color.WHITE:\r\n self.grid_sprites[self.lastRow][self.lastColumn].color = arcade.color.WHITE\r\n self.grid_sprites[row][column].color = arcade.color.GREEN\r\n self.lastRow = row\r\n self.lastColumn = column\r\n else:\r\n self.grid_sprites[row][column].color = arcade.color.WHITE\r\n\r\n\r\ndef main():\r\n \"\"\" Main function \"\"\"\r\n window = arcade.Window(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)\r\n start_view = TitleView()\r\n window.show_view(start_view)\r\n start_view.setup()\r\n arcade.run()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"Tricerator/GameDevHub","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":42098,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"36045365355","text":"import nltk\nimport re\nimport string\n\n\n#nltk.download('punkt')\n#nltk.download('stopwords')\nstopwords = list(nltk.corpus.stopwords.words(\"russian\")) + [\n 'вс', 'все', 'всё', 'супер', 'прекрасно', 'отлично', 'очень', 'удобно', 'хорошо',\n 'классно', 'молодцы', 'отлично', 'ок', 'нормально', 'спасибо', 'класс', 'норм',\n 'приложение', 'быстро', 'просто', 'удобное', 'отзыв', 'сбербанк', 'онлайн',\n 'лучшее', 'полезное', 'good', 'ok', 'отличное', 'банк'\n]\npunkt = string.punctuation + '«»'\n\n\ndef tokenizer(text, remove_stopwords=False):\n def _is_valid(x):\n if len(x) == 0:\n return False\n if remove_stopwords and x in stopwords:\n return False\n return True\n\n #words = nltk.word_tokenize(text.lower(), language=\"russian\")\n text = text.lower()\n for ch in punkt:\n text = text.replace(ch, ' ')\n words = text.split()\n res = list(filter(_is_valid, words))\n return res\n","repo_name":"NUTS-COON/sbercode-sber_online","sub_path":"Code/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"35316969749","text":"\"\"\" trades functions for database interaction \"\"\"\n\nimport sys\nsys.path.append('../TradingBot') # path to parent directory\nfrom database import *\nfrom stock_data import get_live_stock_price\n\n\ndef get_all_trades():\n query = \"SELECT * FROM trades;\"\n data = run_all_query(query)\n return data\n\n\ndef get_users_trades(id):\n query = \"SELECT * FROM trades WHERE userID = %s\"\n value = id\n data = run_value_query(query, value)\n return data\n\n\ndef add_position(values):\n query = \"INSERT INTO trades (userID, ticker, position, quantity, entry_stock_price, entry_total_price, status) VALUES (%s, %s, %s, %s, %s, %s, %s)\"\n data = run_alter_query(query, values)\n return data\n\n\ndef get_trade(ID):\n query = \"SELECT * FROM trades WHERE ID = %s\"\n value = ID\n data = run_value_query(query, value)\n return data\n\n\ndef check_position(id, ticker, position_type):\n query = \"SELECT EXISTS(SELECT * FROM trades WHERE userID = %s and ticker = %s and position = %s)\"\n values = (id, ticker, position_type)\n data = run_multiple_value_query(query, values)\n data = int(\"{}\".format(*data[0]))\n if(data == 1):\n # check if position is open\n new_query = \"SELECT status, ID FROM trades where userID = %s and ticker = %s and position = %s\"\n new_values = (id, ticker, position_type)\n result = run_multiple_value_query(new_query, new_values)\n status = result[0][0]\n ID = result[0][1]\n if(status == 1):\n close_position(ID, ticker)\n return True\n return False\n\n\ndef close_position(trade_id, ticker):\n quantity = int(\"{}\".format((get_trade(trade_id)[0][5])))\n exit_price = round(get_live_stock_price(ticker), 2)\n exit_total_price = round(quantity * exit_price, 2)\n query = \"UPDATE trades SET status = %s, close_date = %s, exit_stock_price = %s, exit_total_stock_price = %s WHERE ID = %s\"\n values = (2, \"CURRENT_TIMESTAMP\", exit_price, exit_total_price, trade_id)\n data = run_alter_query(query, values)\n return data\n\n\n#print(get_all_trades())\n#print(get_users_trades(1))\n#data = check_position(1, 'MSFT', 1)\n#print(data)\n\n\"\"\"values = (1, 'MSFT', 1, 10, 200, 2000, 1)\nadd_position(values)\"\"\"","repo_name":"surajmannn/TradingBot","sub_path":"database_models/trades.py","file_name":"trades.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"74399555208","text":"import open3d as o3d\nimport numpy as np\nimport cv2 as cv\nfrom path import Path\nfrom imageio import imread\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nfrom scipy.spatial.transform import Rotation\nfrom imageio import imwrite\nimport os, sys, threading, time, argparse, glob\n\nfrom utils_vis import *\n\n\nclass GCVDReader():\n def __init__(self, folder):\n self.depth_dir = Path(folder)/'depths/final'\n self.camera_dir = Path(folder)/'camera/final'\n self.image_dir = Path(folder)/'images_down'\n\n self.image_filenames = sorted(list(glob.glob(self.image_dir/'*')))\n self.depth_filenames = sorted(list(glob.glob(self.depth_dir/'*')))\n self.camera_filenames = sorted(list(glob.glob(self.camera_dir/'*')))\n \n self.length = len(self.image_filenames)\n assert len(self.depth_filenames) == self.length\n assert len(self.camera_filenames) == self.length\n\n def read_camera(self, index):\n camera = np.load(self.camera_filenames[index], allow_pickle=True)[()]\n K = camera['K']\n pose = camera['pose']\n return K, pose\n\n def read(self, index):\n image = imread(self.image_filenames[index]).astype(float) / 255.\n depth = np.load(self.depth_filenames[index]).squeeze(0)\n K, pose = self.read_camera(index)\n \n return image, depth, K, pose\n\nclass SevenScenesReader():\n def __init__(self, folder):\n self.camera_filenames = sorted(list(glob.glob(Path(folder)/'*.pose.txt')))\n self.length = len(self.camera_filenames)\n\n def read_camera(self, index):\n pose = np.genfromtxt(self.camera_filenames[index])\n return None, pose\n\ndef read_trajectory(reader):\n poses = []\n for i in range(reader.length):\n poses.append(reader.read_camera(i)[1])\n poses = np.stack(poses, axis=0)\n return poses\n\nindex = 0\nparser = argparse.ArgumentParser(description='3D Visualization')\nparser.add_argument('input_path', type=str)\nparser.add_argument('groundtruth_path', type=str)\nopt = parser.parse_args()\n\nreader = GCVDReader(opt.input_path)\ngt_reader = SevenScenesReader(opt.groundtruth_path)\ngcvd_traj = read_trajectory(reader)\ngt_traj = read_trajectory(gt_reader)\n\ngcvd_traj_aligned, align_transform, ate = align_trajectory(gt_traj, gcvd_traj)\nprint('ATE = {:.04} m'.format(ate))\n\napp = o3d.visualization.gui.Application.instance\napp.initialize()\n\nheight, width = 1080, 1920\nwindow = o3d.visualization.gui.Application.instance.create_window('img', width=width, height=height)\nwidget = o3d.visualization.gui.SceneWidget()\nwidget.scene = o3d.visualization.rendering.Open3DScene(window.renderer)\nvis = widget.scene\n\nwindow.add_child(widget)\nmat = o3d.visualization.rendering.Material()\nmat.shader = 'unlitLine'\nmat.line_width = 3\n\nmat_g = o3d.visualization.rendering.Material()\nmat_g.shader = 'unlitLine'\nmat_g.line_width = 1.5\n\nmat_pcd = o3d.visualization.rendering.Material()\n\nview_cam = widget.scene.camera\n\n# view offset\noffset = np.array([[ 9.99663439e-01, 2.59491099e-02, 1.59831567e-04, -5.18736794e-02],\n [ 2.12940823e-02, -8.16779167e-01, -5.76557411e-01, -1.95576986e+00],\n [-1.48307187e-02, 5.76366692e-01, -8.17056646e-01, -1.99995985e+00],\n [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])\n\nprev_pose = None\n\ndef get_look_at_param(V=None):\n if V is None:\n global view_cam\n V = view_cam.get_view_matrix()\n V = np.linalg.inv(V)\n eye = V[:3, 3]\n Z = V[:3, 2]\n Y = V[:3, 1]\n X = V[:3, 0]\n center = eye - Z\n up = np.cross(Z, X) / np.dot(Z, Z) + Z\n return center, eye, up \n\ndef add_new_cam():\n global index, reader, vis, view_cam, widget, prev_pose, offset, align_transform, gcvd_traj_aligned, gt_traj\n\n if index >= reader.length: return \n image, depth, K, pose = reader.read(index)\n \n align_R, align_t, align_s = align_transform\n rot_aligned = align_R @ pose[:3, :3]\n\n try:\n view_param = get_look_at_param(gt_traj[index] @ offset)\n view_cam.look_at(list(view_param[0]), list(view_param[1]), list(view_param[2]))\n\n except Exception as e:\n pass\n\n R = rot_aligned\n t = gcvd_traj_aligned[index]\n \n cam_set, _ = gen_cam_lineset(image.shape[:2], K, color=[1, 0, 0], R=R, T=t, size_scale=0.2)\n vis.remove_geometry('cam_{}'.format(index-1))\n vis.add_geometry('cam_{}'.format(index), cam_set, mat)\n\n pcd = compute_point_cloud(image, depth * align_s, K, R, t, color=None)\n vis.remove_geometry('cloud')\n vis.add_geometry('cloud'.format(index), pcd, mat_pcd)\n\n \n if index > 0:\n traj_lineset = o3d.geometry.LineSet()\n traj_lineset.colors = o3d.utility.Vector3dVector([[0, 1, 0]])\n traj_lineset.points = o3d.utility.Vector3dVector(gcvd_traj_aligned[index - 1: index + 1])\n traj_lineset.lines = o3d.utility.Vector2iVector([[0, 1]])\n vis.add_geometry('traj_{}'.format(index), traj_lineset, mat)\n \n index += 1\n\ngt_lineset = o3d.geometry.LineSet()\ngt_lineset.points = o3d.utility.Vector3dVector(gt_traj[:, :3, 3])\ngt_lineset.lines = o3d.utility.Vector2iVector([[i, i+1] for i in range(gt_reader.length - 1)])\ngt_lineset.colors = o3d.utility.Vector3dVector([[0, 0, 1] for i in range(gt_reader.length - 1)])\nvis.add_geometry('gt_traj', gt_lineset, mat_g)\n\ndef thread_main():\n while True:\n o3d.visualization.gui.Application.instance.post_to_main_thread(window, add_new_cam)\n time.sleep(0.03)\n\nvis_thread = threading.Thread(target=thread_main)\nvis_thread.start()\no3d.visualization.gui.Application.instance.run()\n\n","repo_name":"yaochih/GCVD-release","sub_path":"visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":5598,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"16"} +{"seq_id":"72746178249","text":"\"\"\"eng_soft_prototipo URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\n\nfrom home import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n\n path('', views.welcome, name=\"welcome\"),\n\n # manipulando usuario\n path('register/', views.register, name=\"register\"),\n path('register/advanced_register', views.advanced_register, name=\"register_advanced\"),\n path('login/', views.login_page, name=\"login\"),\n path('logout/', views.logout_user, name=\"logout\"),\n\n # paginas disponiveis ao usuario\n path('home/', views.home, name=\"home\"),\n path('minhas_compras/', views.minhas_compras, name=\"minhas_compras\"),\n path('minhas_compras/deletar_td/', views.deletar_TesouroDireto, name=\"deletar_TesouroDireto\"),\n path('minhas_compras/deletar_fi/', views.deletar_FundodeInvestimento, name=\"deletar_FundodeInvestimento\"),\n path('minhas_compras/deletar_rf/', views.deletar_RendaFixa, name=\"deletar_RendaFixa\"),\n path('meu_cadastro/', views.meu_cadastro, name=\"meu_cadastro\"),\n path('home/tesouro_direto/', views.tesouro_direto, name=\"tesouro_direto\"),\n path('home/fundo_de_investimento/', views.fundo_de_investimento, name=\"fundo_de_investimento\"),\n path('home/renda_fixa/', views.renda_fixa, name=\"renda_fixa\"),\n path('meu_cadastro/alterar_dados/', views.alterar_dados, name=\"alterar_dados\"),\n\n]\n","repo_name":"gui1080/Prototipo_Eng_Software_2020-1","sub_path":"eng_soft_prototipo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"16868110470","text":"from django.conf import settings\nfrom django.contrib import admin\nfrom django.urls import include\nfrom django.urls import path\n\nSTATIC_DIR = settings.PROJECT_DIR / \"static\"\n\nurlpatterns = [\n path(\"admin/\", admin.site.urls),\n path(\"\", include(\"apps.index.urls\")),\n path(\"resume/\", include(\"apps.resume.urls\")),\n path(\"education/\", include(\"apps.education.urls\")),\n path(\"blog/\", include(\"apps.blog.urls\")),\n path(\"o/\", include(\"apps.authorization.urls\")),\n path(\"api/\", include(\"apps.api.urls\"))\n]\n","repo_name":"ksradau/Resume_Project","sub_path":"src/project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"17725935418","text":"import os\n\nimport numpy as np\nimport torch\nfrom torch.utils.data.dataset import Subset\nfrom torchvision import datasets, transforms\n\nfrom utils.utils import set_random_seed\n\nDATA_PATH = './data/'\nIMAGENET_PATH = './data/ImageNet'\n\nCIFAR10_SUPERCLASS = list(range(10)) # one class\nCIFAR100_CORUPTION_SUPERCLASS = list(range(20)) # one class\n\nIMAGENET_SUPERCLASS = list(range(30)) # one class\n\nIMAGENET30_SUPERCLASS = list(range(2))\n\nCIFAR100_SUPERCLASS = [\n [4, 31, 55, 72, 95],\n [1, 33, 67, 73, 91],\n [54, 62, 70, 82, 92],\n [9, 10, 16, 29, 61],\n [0, 51, 53, 57, 83],\n [22, 25, 40, 86, 87],\n [5, 20, 26, 84, 94],\n [6, 7, 14, 18, 24],\n [3, 42, 43, 88, 97],\n [12, 17, 38, 68, 76],\n [23, 34, 49, 60, 71],\n [15, 19, 21, 32, 39],\n [35, 63, 64, 66, 75],\n [27, 45, 77, 79, 99],\n [2, 11, 36, 46, 98],\n [28, 30, 44, 78, 93],\n [37, 50, 65, 74, 80],\n [47, 52, 56, 59, 96],\n [8, 13, 48, 58, 90],\n [41, 69, 81, 85, 89],\n]\n\n\n\nimport torch\nfrom torchvision import transforms\nfrom torch.utils.data import Dataset, DataLoader\nimport numpy as np\nfrom PIL import Image\n\n\ndef sparse2coarse(targets):\n coarse_labels = np.array(\n [4,1,14, 8, 0, 6, 7, 7, 18, 3, 3,\n 14, 9, 18, 7, 11, 3, 9, 7, 11, 6, 11, 5,\n 10, 7, 6, 13, 15, 3, 15, 0, 11, 1, 10,\n 12, 14, 16, 9, 11, 5, 5, 19, 8, 8, 15,\n 13, 14, 17, 18, 10, 16, 4, 17, 4, 2, 0,\n 17, 4, 18, 17, 10, 3, 2, 12, 12, 16, 12,\n 1, 9, 19, 2, 10, 0, 1, 16, 12, 9, 13,\n 15, 13, 16, 19, 2, 4, 6, 19, 5, 5, 8,\n 19, 18, 1, 2, 15, 6, 0, 17, 8, 14, 13,])\n return coarse_labels[targets]\n\nclass CIFAR_CORRUCPION(Dataset):\n def __init__(self, transform=None, normal_idx = [0], cifar_corruption_label = 'CIFAR-10-C/labels.npy', cifar_corruption_data = './CIFAR-10-C/defocus_blur.npy'):\n self.labels_10 = np.load(cifar_corruption_label)\n self.labels_10 = self.labels_10[:10000]\n if cifar_corruption_label == 'CIFAR-100-C/labels.npy':\n self.labels_10 = sparse2coarse(self.labels_10)\n self.data = np.load(cifar_corruption_data)\n self.data = self.data[:10000]\n self.transform = transform\n \n def __getitem__(self, index):\n x = self.data[index]\n y = self.labels_10[index]\n if self.transform:\n x = Image.fromarray((x * 255).astype(np.uint8))\n x = self.transform(x) \n return x, y\n \n def __len__(self):\n return len(self.data)\n\nclass MultiDataTransform(object):\n def __init__(self, transform):\n self.transform1 = transform\n self.transform2 = transform\n\n def __call__(self, sample):\n x1 = self.transform1(sample)\n x2 = self.transform2(sample)\n return x1, x2\n\n\nclass MultiDataTransformList(object):\n def __init__(self, transform, clean_trasform, sample_num):\n self.transform = transform\n self.clean_transform = clean_trasform\n self.sample_num = sample_num\n\n def __call__(self, sample):\n set_random_seed(0)\n\n sample_list = []\n for i in range(self.sample_num):\n sample_list.append(self.transform(sample))\n\n return sample_list, self.clean_transform(sample)\n\n\ndef get_transform(image_size=None):\n # Note: data augmentation is implemented in the layers\n # Hence, we only define the identity transformation here\n if image_size: # use pre-specified image size\n train_transform = transforms.Compose([\n transforms.Resize((image_size[0], image_size[1])),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n ])\n test_transform = transforms.Compose([\n transforms.Resize((image_size[0], image_size[1])),\n transforms.ToTensor(),\n ])\n else: # use default image size\n train_transform = transforms.Compose([\n transforms.ToTensor(),\n ])\n test_transform = transforms.ToTensor()\n\n return train_transform, test_transform\n\n\ndef get_subset_with_len(dataset, length, shuffle=False):\n set_random_seed(0)\n dataset_size = len(dataset)\n\n index = np.arange(dataset_size)\n if shuffle:\n np.random.shuffle(index)\n\n index = torch.from_numpy(index[0:length])\n subset = Subset(dataset, index)\n\n assert len(subset) == length\n\n return subset\n\n\ndef get_transform_imagenet():\n train_transform = transforms.Compose([\n transforms.Resize(256),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n ])\n test_transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n ])\n\n train_transform = MultiDataTransform(train_transform)\n\n return train_transform, test_transform\n\n\ndef get_dataset(P, dataset, test_only=False, image_size=None, download=False, eval=False):\n download = True\n if dataset in ['imagenet', 'cub', 'stanford_dogs', 'flowers102',\n 'places365', 'food_101', 'caltech_256', 'dtd', 'pets']:\n if eval:\n train_transform, test_transform = get_simclr_eval_transform_imagenet(P.ood_samples,\n P.resize_factor, P.resize_fix)\n else:\n train_transform, test_transform = get_transform_imagenet()\n else:\n train_transform, test_transform = get_transform(image_size=image_size)\n\n if dataset == 'cifar10':\n image_size = (32, 32, 3)\n n_classes = 10\n train_set = datasets.CIFAR10(DATA_PATH, train=True, download=download, transform=train_transform)\n test_set = datasets.CIFAR10(DATA_PATH, train=False, download=download, transform=test_transform)\n elif dataset == 'svhn':\n image_size = (32, 32, 3)\n n_classes = 10\n train_set = datasets.SVHN(DATA_PATH, split='train', download=download, transform=test_transform)\n test_set = datasets.SVHN(DATA_PATH, split='test', download=download, transform=test_transform)\n elif dataset == 'svhn-10':\n image_size = (32, 32, 3)\n n_classes = 10\n transform = transforms.Compose([\n transforms.Resize((image_size[0], image_size[1])),\n transforms.ToTensor(),\n ])\n train_set = datasets.SVHN(DATA_PATH, split='train', download=download, transform=transform)\n test_set = datasets.SVHN(DATA_PATH, split='test', download=download, transform=transform)\n print(\"train_set shapes: \", train_set[0][0].shape)\n print(\"test_set shapes: \", test_set[0][0].shape)\n \n elif dataset == 'svhn-10-corruption':\n image_size = (32, 32, 3)\n def gaussian_noise(image, mean=P.noise_mean, std = P.noise_std, noise_scale = P.noise_scale):\n image = image + (torch.randn(image.size()) * std + mean)*noise_scale\n return image\n\n n_classes = 10\n train_transform = transforms.Compose([\n transforms.Resize((image_size[0], image_size[1])),\n transforms.ToTensor(),\n ])\n test_transform = transforms.Compose([\n transforms.Resize((image_size[0], image_size[1])),\n transforms.ToTensor(),\n transforms.Lambda(gaussian_noise)\n ])\n\n train_set = datasets.SVHN(DATA_PATH, split='train', download=download, transform=train_transform)\n test_set = datasets.SVHN(DATA_PATH, split='test', download=download, transform=test_transform)\n print(\"train_set shapes: \", train_set[0][0].shape)\n print(\"test_set shapes: \", test_set[0][0].shape)\n elif dataset == 'mnist':\n n_classes = 10\n train_transform = transforms.Compose([\n transforms.Resize((image_size[0], image_size[1])),\n transforms.Grayscale(num_output_channels=3),\n transforms.ToTensor(),\n ])\n test_transform = transforms.Compose([\n transforms.Resize((image_size[0], image_size[1])),\n transforms.Grayscale(num_output_channels=3),\n transforms.ToTensor(),\n ])\n train_set = datasets.MNIST(DATA_PATH, train=True, download=download, transform=train_transform)\n test_set = datasets.MNIST(DATA_PATH, train=False, download=download, transform=test_transform)\n print(\"train_set shapes: \", train_set[0][0].shape)\n print(\"test_set shapes: \", test_set[0][0].shape)\n elif dataset == 'imagenet30':\n n_classes = 2\n transform = transforms.Compose([\n transforms.Resize((32, 32)),\n transforms.ToTensor(),\n ])\n anomaly_testset = datasets.ImageFolder('./one_class_test', transform=transform)\n for i in range(len(anomaly_testset)):\n anomaly_testset.targets[i] = 1\n anomaly_trainset = datasets.ImageFolder('./one_class_train', transform=transform)\n for i in range(len(anomaly_trainset)):\n anomaly_trainset.targets[i] = 1\n test_set = anomaly_testset\n train_set = anomaly_trainset\n elif dataset == 'fashion-mnist':\n # image_size = (32, 32, 3)\n n_classes = 10\n train_transform = transforms.Compose([\n transforms.Resize((image_size[0], image_size[1])),\n transforms.Grayscale(num_output_channels=3),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n ])\n test_transform = transforms.Compose([\n transforms.Resize((image_size[0], image_size[1])),\n transforms.Grayscale(num_output_channels=3),\n transforms.ToTensor(),\n ])\n train_set = datasets.FashionMNIST(DATA_PATH, train=True, download=download, transform=train_transform)\n test_set = datasets.FashionMNIST(DATA_PATH, train=False, download=download, transform=test_transform)\n print(\"train_set shapes: \", train_set[0][0].shape)\n print(\"test_set shapes: \", test_set[0][0].shape)\n elif dataset == 'cifar100':\n image_size = (32, 32, 3)\n n_classes = 100\n train_set = datasets.CIFAR100(DATA_PATH, train=True, download=download, transform=train_transform)\n test_set = datasets.CIFAR100(DATA_PATH, train=False, download=download, transform=test_transform)\n elif dataset=='cifar10-corruption':\n n_classes = 10\n transform = transforms.Compose([\n transforms.Resize(32),\n transforms.ToTensor(),\n ])\n test_set = CIFAR_CORRUCPION(transform=transform, cifar_corruption_data=P.cifar_corruption_data)\n train_set = datasets.CIFAR10(DATA_PATH, train=True, download=download, transform=transform)\n print(\"train_set shapes: \", train_set[0][0].shape)\n print(\"test_set shapes: \", test_set[0][0].shape)\n \n elif dataset=='cifar100-corruption':\n n_classes = 100\n transform = transforms.Compose([\n transforms.Resize(32),\n transforms.ToTensor(),\n ])\n test_set = CIFAR_CORRUCPION(transform=transform, cifar_corruption_label='CIFAR-100-C/labels.npy', cifar_corruption_data=P.cifar_corruption_data)\n train_set = datasets.CIFAR100(DATA_PATH, train=True, download=download, transform=transform)\n \n train_set.targets = sparse2coarse(train_set.targets)\n\n print(\"train_set shapes: \", train_set[0][0].shape)\n print(\"test_set shapes: \", test_set[0][0].shape)\n \n elif dataset == 'svhn':\n assert test_only and image_size is not None\n test_set = datasets.SVHN(DATA_PATH, split='test', download=download, transform=test_transform)\n\n elif dataset == 'lsun_resize':\n assert test_only and image_size is not None\n test_dir = os.path.join(DATA_PATH, 'LSUN_resize')\n test_set = datasets.ImageFolder(test_dir, transform=test_transform)\n\n elif dataset == 'lsun_pil' or dataset == 'lsun_fix':\n assert test_only and image_size is not None\n test_dir = os.path.join(DATA_PATH, 'LSUN_fix')\n test_set = datasets.ImageFolder(test_dir, transform=test_transform)\n\n elif dataset == 'imagenet_resize':\n assert test_only and image_size is not None\n test_dir = os.path.join(DATA_PATH, 'Imagenet_resize')\n test_set = datasets.ImageFolder(test_dir, transform=test_transform)\n\n elif dataset == 'imagenet_pil' or dataset == 'imagenet_fix':\n assert test_only and image_size is not None\n test_dir = os.path.join(DATA_PATH, 'Imagenet_fix')\n test_set = datasets.ImageFolder(test_dir, transform=test_transform)\n\n elif dataset == 'imagenet':\n image_size = (224, 224, 3)\n n_classes = 30\n train_dir = os.path.join(IMAGENET_PATH, 'one_class_train')\n test_dir = os.path.join(IMAGENET_PATH, 'one_class_test')\n train_set = datasets.ImageFolder(train_dir, transform=train_transform)\n test_set = datasets.ImageFolder(test_dir, transform=test_transform)\n\n elif dataset == 'stanford_dogs':\n assert test_only and image_size is not None\n test_dir = os.path.join(DATA_PATH, 'stanford_dogs')\n test_set = datasets.ImageFolder(test_dir, transform=test_transform)\n test_set = get_subset_with_len(test_set, length=3000, shuffle=True)\n\n elif dataset == 'cub':\n assert test_only and image_size is not None\n test_dir = os.path.join(DATA_PATH, 'cub200')\n test_set = datasets.ImageFolder(test_dir, transform=test_transform)\n test_set = get_subset_with_len(test_set, length=3000, shuffle=True)\n\n elif dataset == 'flowers102':\n assert test_only and image_size is not None\n test_dir = os.path.join(DATA_PATH, 'flowers102')\n test_set = datasets.ImageFolder(test_dir, transform=test_transform)\n test_set = get_subset_with_len(test_set, length=3000, shuffle=True)\n\n elif dataset == 'places365':\n assert test_only and image_size is not None\n test_dir = os.path.join(DATA_PATH, 'places365')\n test_set = datasets.ImageFolder(test_dir, transform=test_transform)\n test_set = get_subset_with_len(test_set, length=3000, shuffle=True)\n\n elif dataset == 'food_101':\n assert test_only and image_size is not None\n test_dir = os.path.join(DATA_PATH, 'food-101', 'images')\n test_set = datasets.ImageFolder(test_dir, transform=test_transform)\n test_set = get_subset_with_len(test_set, length=3000, shuffle=True)\n\n elif dataset == 'caltech_256':\n assert test_only and image_size is not None\n test_dir = os.path.join(DATA_PATH, 'caltech-256')\n test_set = datasets.ImageFolder(test_dir, transform=test_transform)\n test_set = get_subset_with_len(test_set, length=3000, shuffle=True)\n\n elif dataset == 'dtd':\n assert test_only and image_size is not None\n test_dir = os.path.join(DATA_PATH, 'dtd', 'images')\n test_set = datasets.ImageFolder(test_dir, transform=test_transform)\n test_set = get_subset_with_len(test_set, length=3000, shuffle=True)\n\n elif dataset == 'pets':\n assert test_only and image_size is not None\n test_dir = os.path.join(DATA_PATH, 'pets')\n test_set = datasets.ImageFolder(test_dir, transform=test_transform)\n test_set = get_subset_with_len(test_set, length=3000, shuffle=True)\n\n else:\n raise NotImplementedError()\n\n if test_only:\n return test_set\n else:\n return train_set, test_set, image_size, n_classes\n\n\ndef get_superclass_list(dataset):\n if dataset == 'cifar10' or dataset=='cifar10-corruption' or dataset=='svhn' or dataset=='svhn-10-corruption' or dataset=='svhn-10' or dataset=='fashion-mnist' or dataset=='mnist':\n return CIFAR10_SUPERCLASS\n elif dataset == 'cifar100':\n return CIFAR100_SUPERCLASS\n elif dataset == 'imagenet30':\n return IMAGENET30_SUPERCLASS\n elif dataset == \"cifar100-corruption\":\n return CIFAR100_CORUPTION_SUPERCLASS\n elif dataset == 'imagenet':\n return IMAGENET_SUPERCLASS\n else:\n raise NotImplementedError()\n\n\ndef get_subclass_dataset(dataset, classes):\n if not isinstance(classes, list):\n classes = [classes]\n\n indices = []\n try: \n for idx, tgt in enumerate(dataset.targets):\n if tgt in classes:\n indices.append(idx)\n except:\n # SVHN\n for idx, (_, tgt) in enumerate(dataset):\n if tgt in classes:\n indices.append(idx)\n\n dataset = Subset(dataset, indices)\n return dataset\n\n\ndef get_simclr_eval_transform_imagenet(sample_num, resize_factor, resize_fix):\n resize_scale = (resize_factor, 1.0) # resize scaling factor\n if resize_fix: # if resize_fix is True, use same scale\n resize_scale = (resize_factor, resize_factor)\n\n transform = transforms.Compose([\n transforms.Resize(256),\n transforms.RandomResizedCrop(224, scale=resize_scale),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n ])\n\n clean_trasform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n ])\n\n transform = MultiDataTransformList(transform, clean_trasform, sample_num)\n\n return transform, transform\n","repo_name":"mojtaba-nafez/CSI-corruption-evaluation","sub_path":"datasets/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":17232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"38924049927","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\ndef batchify_data(data, batch_size, device):\n ''' Separates samples into batches per minute '''\n N = data.shape[2]\n # Samples start with silence\n header = np.zeros((data.shape[0], data.shape[1], 1))\n batches = []\n for i in range(0, N, batch_size):\n batches.append({\n 'x': torch.tensor(np.append(header, data[:,:,i:i+batch_size - 1], axis=2), dtype=torch.float32).to(device),\n 'y': torch.tensor(data[:,:,i:i+batch_size], dtype=torch.float32).to(device)\n })\n return batches\n\ndef compute_accuracy(predictions, y, device):\n ''' Computes the cosien similarity of predictions '''\n cos = nn.CosineSimilarity(dim=1, eps=1e-6).to(device)\n return torch.mean(cos(predictions, y))\n\ndef repackage_hidden(hidden):\n ''' Wraps hidden states in new Variables, to detach them from their history. '''\n hidden = tuple([e.data for e in hidden])\n return hidden","repo_name":"DavidContrerasFranco/AI-Soundscape","sub_path":"AI/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22654666217","text":"from facenet_pytorch import MTCNN, InceptionResnetV1\nimport torch\nfrom torchvision import datasets\nfrom torch.utils.data import DataLoader\nfrom PIL import Image\nimport cv2 as cv\n\ndata_path = 'data.pt'\n\ndef face_match(img_path): # img_path= location of photo, data_path= location of data.pt\n # getting embedding matrix of the given img\n #img = Image.open('b.jpg')\n img = Image.fromarray(img_path) #without read file\n face, prob = mtcnn(img, return_prob=True) # returns cropped face and probability\n if prob == None:\n return ('no face', '?')\n emb = resnet(face.unsqueeze(0)).detach() # detech is to make required gradient false\n\n saved_data = torch.load(data_path) # loading data.pt file\n embedding_list = saved_data[0] # getting embedding data\n name_list = saved_data[1] # getting list of names\n dist_list = [] # list of matched distances, minimum distance is used to identify the person\n\n for idx, emb_db in enumerate(embedding_list):\n dist = torch.dist(emb, emb_db).item()\n dist_list.append(dist)\n\n min_dist_list = min(dist_list)\n if min_dist_list>1.0:\n return ('unknown', '?')\n idx_min = dist_list.index(min_dist_list)\n return (name_list[idx_min], min_dist_list)\n\ndef collate_fn(x):\n return x[0]\n\ndef update_data():\n dataset = datasets.ImageFolder('photos') # photos folder path\n idx_to_class = {i: c for c, i in dataset.class_to_idx.items()} # accessing names of peoples from folder names\n\n loader = DataLoader(dataset, collate_fn=collate_fn)\n\n #face_list = [] # list of cropped faces from photos folder\n name_list = [] # list of names corrospoing to cropped photos\n embedding_list = [] # list of embeding matrix after conversion from cropped faces to embedding matrix using resnet\n for img, idx in loader:\n face, prob = mtcnn(img, return_prob=True)\n if face is not None and prob > 0.90: # if face detected and porbability > 90%\n emb = resnet(face.unsqueeze(0)) # passing cropped face into resnet model to get embedding matrix\n embedding_list.append(emb.detach()) # resulten embedding matrix is stored in a list\n name_list.append(idx_to_class[idx]) # names are stored in a list\n\n data = [embedding_list, name_list]\n torch.save(data, data_path) # saving data.pt file\n\nmtcnn = MTCNN(margin=0, min_face_size=20) # initializing mtcnn for face detection\nresnet = InceptionResnetV1(pretrained='vggface2').eval() # initializing resnet for face img to embeding conversion\n#resnet.classify = True\n\ncap = cv.VideoCapture(0)\nret, im = cap.read()\ny, x, _ = im.shape\ny, x = y//2, x//2\ny1, y2, x1, x2 = y - 180, y + 180, x - 130, x + 130\n\n'''\n________________________________\n'''\nupdate_data()\n'''\n________________________________\n'''\n\n#import time\n#start_time = time.time()\nwhile True:\n ret, image = cap.read()\n image_crop = image[y1:y2, x1:x2, :]\n\n result = face_match(image_crop)\n\n #image = cv.cvtColor(image, cv.COLOR_BGR2GRAY) #так даже дольше получилось, ну или я криво измерил лол\n cv.putText(image, f'Face matched with: {result[0]} With distance: {result[1]}',(30,30),cv.FONT_HERSHEY_DUPLEX, 0.5, (0,0,0))\n cv.rectangle(image, (x1, y1), (x2, y2), (255, 255, 255), 2)\n cv.imshow('me', image)\n key = cv.waitKey(1)\n if key == ord('q'):\n break\n if key == ord(' '):\n cv.waitKey()\n#print(\"--- %s seconds ---\" % (time.time() - start_time))\n\n\ncv.destroyWindow()","repo_name":"mkgs210/face_recognition","sub_path":"v_0.0.py","file_name":"v_0.0.py","file_ext":"py","file_size_in_byte":3507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"6965009878","text":"import os\n\nfrom oauth2client import client\nfrom oauth2client import tools\nfrom oauth2client.file import Storage\nfrom settings import (\n APPLICATION_NAME, CLIENT_SECRET_FILE, SCOPES\n)\nimport argparse\n\n\nif __name__ == \"__main__\":\n flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\n\n\n\ndef get_credentials():\n \"\"\"Gets valid user credentials from storage.\n\n If nothing has been stored, or if the stored credentials are invalid,\n the OAuth2 flow is completed to obtain the new credentials.\n\n Returns:\n Credentials, the obtained credential.\n \"\"\"\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir, 'sheets.pocketmoney.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n print('Storing credentials to ' + credential_path)\n return credentials\n","repo_name":"rebkwok/pocketmoney","sub_path":"auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"26340391231","text":"import pandas as pd\nimport numpy as np\nimport astropy.units as u\nimport astropy.constants as const\n\nfrom heliopy.data import helios\nimport helioshelp\nimport pathlib\n\n\ndef load_data(fitdir):\n fitdir = pathlib.Path(fitdir)\n dates = pd.read_csv('stream_times.csv', parse_dates=[1, 2])\n protons, alphas = [], []\n for _, row in dates.iterrows():\n # Import protons\n protons.append(helios.corefit(row['Probe'],\n row['Start'],\n row['End']).data)\n\n probe = row['Probe']\n protons[-1]['Probe'] = probe\n\n year = row['Start'].strftime('%Y')\n startdoy = int(row['Start'].strftime('%j'))\n enddoy = int(row['End'].strftime('%j'))\n # Import alphas\n this_alphas = []\n for doy in range(startdoy, enddoy + 1):\n this_alphas.append(pd.read_csv(\n fitdir /\n 'helios{}'.format(probe) / '{}'.format(year) /\n 'h{}_{}_{:03d}_alpha_fits.csv'.format(probe, year, doy),\n index_col=0, parse_dates=[0]))\n this_alphas[-1]['Probe'] = probe\n this_alphas = pd.concat(this_alphas)\n\n this_alphas = this_alphas[this_alphas.index > row['Start']]\n this_alphas = this_alphas[this_alphas.index < row['End']]\n\n print(this_alphas.index.min(), this_alphas.index.max(),\n protons[-1]['r_sun'].min(),\n protons[-1]['r_sun'].max())\n alphas.append(this_alphas)\n\n protons = pd.concat(protons)\n alphas = pd.concat(alphas)\n protons = helioshelp.calculate_derived(protons)\n\n def reindex(probe):\n this_p = protons[protons['Probe'] == probe]\n this_a = alphas[alphas['Probe'] == probe]\n return this_p.reindex(index=this_a.index)\n\n protons = pd.concat([reindex(probe) for probe in [1, 2]])\n alphas['r_sun'] = protons['r_sun']\n\n return protons, alphas\n\n\ndef par_energy_density(n, T):\n eps = 0.5 * (n.values * u.cm**-3) * const.k_B * (T.values * u.K)\n return eps.to(u.J / u.m**3)\n\n\ndef perp_energy_density(n, T):\n eps = (n.values * u.cm**-3) * const.k_B * (T.values * u.K)\n return eps.to(u.J / u.m**3)\n\n\ndef calculate_derived(protons, alphas):\n alphas['Tani'] = alphas['Ta_perp'] / alphas['Ta_par']\n protons['Tani'] = protons['Tp_perp'] / protons['Tp_par']\n alphas['Tp_ani'] = protons['Tani']\n\n alphas['Ta_tot'] = (2 * alphas['Ta_perp'] + alphas['Ta_par']) / 3\n protons['Tp_tot'] = (2 * protons['Tp_perp'] + protons['Tp_par']) / 3\n\n alphas['Ta/Tp_perp'] = alphas['Ta_perp'] / protons['Tp_perp']\n alphas['Ta/Tp_par'] = alphas['Ta_par'] / protons['Tp_par']\n alphas['Ta/Tp_tot'] = alphas['Ta_tot'] / protons['Tp_tot']\n\n protons['|B|'] = np.linalg.norm(protons[['Bx', 'By', 'Bz']].values, axis=1)\n protons['Beta'] = helioshelp.beta(protons['n_p'], protons['Tp_par'], protons['|B|'])\n alphas['Beta'] = helioshelp.beta(alphas['n_a'], alphas['Ta_par'], protons['|B|'])\n\n alphas['abundance'] = alphas['n_a'] / protons['n_p']\n alphas['vth_par'] = helioshelp.temp2vth(alphas['Ta_par'].values, 4)\n alphas['vth_perp'] = helioshelp.temp2vth(alphas['Ta_perp'].values, 4)\n alphas['|v|'] = np.linalg.norm(alphas[['va_x', 'va_y', 'va_z']].values, axis=1)\n\n for comp in ['x', 'y', 'z']:\n alphas['drift_' + comp] = alphas['va_'+ comp] - protons['vp_' + comp]\n alphas['|drift|'] = np.linalg.norm(alphas[['drift_x', 'drift_y', 'drift_z']].values, axis=1)\n\n for pkey in ['Tp_perp', 'Tp_par']:\n alphas[pkey] = protons[pkey]\n\n alphas['eps_p_par'] = par_energy_density(protons['n_p'], protons['Tp_par']).value\n alphas['eps_a_par'] = par_energy_density(alphas['n_a'], alphas['Ta_par']).value\n alphas['eps_p_perp'] = perp_energy_density(protons['n_p'], protons['Tp_perp']).value\n alphas['eps_a_perp'] = perp_energy_density(alphas['n_a'], alphas['Ta_perp']).value\n\n return protons, alphas\n","repo_name":"dstansby/publication-code","sub_path":"2018-helios-alphas/library/local_helpers.py","file_name":"local_helpers.py","file_ext":"py","file_size_in_byte":3934,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"31763191919","text":"def addTwoNumbers(self, l1, l2):\n res = ListNode(0)\n r, c = res, 0\n while l1 and l2:\n s = l1.val + l2.val + c\n r.next, c = ListNode(s%10), s//10\n l1, l2, r = l1.next, l2.next, r.next\n\n ll = l1 if l1 else l2\n while ll: # the lengths of l1 and l2 are not equal\n s = ll.val + c\n r.next, c = ListNode(s%10), s//10\n ll, r = ll.next, r.next\n \n if c == 1: # important! c might be 1 after the addition of l1 and l2\n r.next = ListNode(1)\n return res.next","repo_name":"minuso/leetcode","sub_path":"0002/addTwoNumbers.py","file_name":"addTwoNumbers.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12285762717","text":"import math\nimport random\nimport time\nimport typing\n\nimport numpy\n\n\nclass BrainWrinkle:\n def __init__(\n self, input_size: int, output_size: int, function: str = \"relu\"\n ) -> None:\n \"\"\"Store all required layer data so it can be accessed easily.\"\"\"\n self.input = input_size\n self.output = output_size\n self.function = function\n\n @staticmethod\n def relu(inputs: numpy.array) -> numpy.array:\n \"\"\"An activation function for all hidden layers that returns only positive values or zero.\"\"\"\n return numpy.maximum(0, inputs)\n\n @staticmethod\n def softmax(inputs: numpy.array):\n \"\"\"An activation function for the output layer to get the most probable direction to move.\"\"\"\n inputs = inputs.astype(numpy.float64)\n try:\n return numpy.exp(inputs) / numpy.sum(numpy.exp(inputs))\n except Exception as err:\n print(inputs)\n print(type(inputs))\n raise (err)\n\n def think(\n self, inputs: numpy.array, weights: numpy.array, bias: numpy.array\n ) -> numpy.array:\n \"\"\"Thinking is hard, especially with loops, so use a dot product. This is the bit that would run on\n a GPU if I knew how to do that.\"\"\"\n thought = numpy.dot(inputs, weights)\n thought = thought + bias\n return self.relu(thought) if self.function == \"relu\" else self.softmax(thought)\n\n\nclass Brain:\n def __init__(self) -> None:\n \"\"\"What does a network look like? Does it look like a bitch? O_o\"\"\"\n self.wrinkles = []\n self.weights = []\n\n def add_layer(self, wrinkle: BrainWrinkle) -> None:\n \"\"\"Who needs a well defined path finding algorith when I can just add more layers?\"\"\"\n self.wrinkles.append(wrinkle)\n return self\n\n def build(self) -> None:\n \"\"\"Did I seriously just use a builder pattern?\"\"\"\n numpy.random.seed(int(time.time()))\n for wrinkle in self.wrinkles:\n self.weights.append(\n {\n \"W\": numpy.random.uniform(\n -1.0, 1.0, size=(wrinkle.input, wrinkle.output)\n ),\n \"B\": numpy.zeros((1, wrinkle.output)),\n }\n )\n return self\n\n @staticmethod\n def _translate_thought(thought: numpy.array) -> str:\n \"\"\"Reality can be what ever I make it.\"\"\"\n max_index = thought.argmax()\n if max_index == 0:\n return \"R\"\n elif max_index == 1:\n return \"D\"\n elif max_index == 2:\n return \"L\"\n elif max_index == 3:\n return \"U\"\n else:\n raise Exception(\"somethings wrong I can feel it\")\n\n def think(self, data: numpy.array):\n \"\"\"Just because you have the ability to think doesn't make you intelligent...\"\"\"\n result = data\n\n for i in range(len(self.wrinkles)):\n input = result\n result = self.wrinkles[i].think(\n input, self.weights[i][\"W\"], self.weights[i][\"B\"]\n )\n\n return self._translate_thought(result)\n","repo_name":"Chippers255/aoc_2022","sub_path":"day_12/failure/brain.py","file_name":"brain.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"32985762038","text":"import json\n\nfrom git import GitCommandError, Repo\n\n\nclass RepoStore:\n\n def __init__(self, url: str, dir_location: str):\n self.link = url\n self.dir_location = dir_location\n self.package_location = dir_location + '/package.json'\n\n def clone_repo(self):\n try:\n # print(\"Cloning...\")\n Repo.clone_from(self.link, self.dir_location)\n except GitCommandError:\n pass\n # print(\"Repository already cloned!\")\n\n def get_dependencies(self) -> dict:\n try:\n with open(self.package_location, 'r') as json_file:\n data = json.load(json_file)\n try:\n data.get('dependencies').pop('github', None)\n except AttributeError:\n print(\"Module has no dependencies listed!\")\n return {}\n return data.get('dependencies')\n except FileNotFoundError:\n print(\"No package.json file found! Please use clone_repo() first.\")\n return {}\n","repo_name":"Purdue-ECE-461/project-2-18","sub_path":"p18website/ranking_modules/repo_store.py","file_name":"repo_store.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"6977714839","text":"#!/usr/bin/env python3.6\n# -*- coding: utf-8 -*-\n\nimport stdlib\nfrom stdlib.template import basic\nfrom stdlib.manifest import manifest\n\n\n@manifest(\n name='dejavu',\n category='ui-resources',\n description='''\n A font family based on the Vera Fonts.\n ''',\n tags=['font', 'vera', 'unicode', 'ttf', 'otf'],\n maintainer='grange_c@raven-os.org',\n licenses=[stdlib.license.License.CUSTOM],\n upstream_url='https://dejavu-fonts.github.io/',\n kind=stdlib.kind.Kind.EFFECTIVE,\n versions_data=[\n {\n 'semver': '2.37.0',\n 'fetch': [{\n 'url': 'https://sourceforge.net/projects/dejavu/files/dejavu/2.37/dejavu-fonts-ttf-2.37.tar.bz2',\n 'sha256': 'fa9ca4d13871dd122f61258a80d01751d603b4d3ee14095d65453b4e846e17d7',\n }],\n },\n ],\n)\ndef build(build):\n packages = basic.build()\n\n packages['ui-resources/dejavu'].drain_build_cache('ttf/*.ttf', 'usr/share/fonts/dejavu/')\n\n return packages\n","repo_name":"raven-os/nbuild-manifests","sub_path":"ui-resources/dejavu.py","file_name":"dejavu.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"13363201963","text":"# -*- coding: utf-8 -*-\n\"\"\" AcademyTrainingActivity\n\nThis module extends the academy.training.activity Odoo model\n\"\"\"\n\nfrom odoo import models, fields, api\n\nfrom logging import getLogger\n\n_logger = getLogger(__name__)\n\n\nclass AcademyTrainingActivity(models.Model):\n \"\"\" Extends model adding a many2many field to link tests to actions\n \"\"\"\n\n _inherit = 'academy.training.activity'\n\n available_time = fields.Float(\n string='Default time',\n required=False,\n readonly=False,\n index=False,\n default=0.5,\n digits=(16, 2),\n help=('Default available time to complete exercises. This value will '\n 'be used to create new templates')\n )\n\n correction_scale_id = fields.Many2one(\n string='Default correction scale',\n required=False,\n readonly=False,\n index=False,\n default=None,\n comodel_name='academy.tests.correction.scale',\n domain=[],\n context={},\n ondelete='cascade',\n auto_join=False,\n help=('Choose the default correction scale will be used on the new ',\n 'created templates')\n )\n\n assignment_ids = fields.One2many(\n string='Test assignments',\n required=False,\n readonly=False,\n index=True,\n default=None,\n comodel_name='academy.tests.test.training.assignment',\n inverse_name='training_activity_id',\n domain=[],\n context={},\n auto_join=False,\n limit=None,\n help=('List of test assignments that have been created for this '\n 'training action enrollment')\n )\n\n assignment_count = fields.Integer(\n string='Nº assignments',\n required=False,\n readonly=True,\n index=False,\n default=0,\n store=False,\n compute='_compute_assignment_count',\n help=('Show the number of test assignments that have been created for'\n 'this training action enrollment')\n )\n\n @api.depends('assignment_ids')\n def _compute_assignment_count(self):\n for record in self:\n record.assignment_count = \\\n len(record.assignment_ids)\n\n template_ids = fields.One2many(\n string='Templates',\n required=False,\n readonly=False,\n index=True,\n default=None,\n comodel_name='academy.tests.random.template',\n inverse_name='training_activity_id',\n domain=[],\n context={},\n auto_join=False,\n limit=None,\n help=('List of test templates available to be used in this training '\n 'action enrollment')\n )\n\n template_count = fields.Integer(\n string='Nº templates',\n required=False,\n readonly=True,\n index=False,\n default=0,\n store=False,\n compute='_compute_template_count',\n help=('Show the number of test templates available to be used in this '\n 'training action enrollment')\n )\n\n @api.depends('template_ids')\n def _compute_template_count(self):\n for record in self:\n record.template_count = len(record.template_ids)\n\n available_question_ids = fields.Many2manyView(\n string='Available questions',\n required=False,\n readonly=True,\n index=False,\n default=None,\n help='Show questions available in the module',\n comodel_name='academy.tests.question',\n relation='academy_tests_question_training_activity_rel',\n column1='training_activity_id',\n column2='question_id',\n domain=[],\n context={},\n limit=None,\n copy=False\n )\n\n def create_test_template(self, no_open=False):\n template_obj = self.env['academy.tests.random.template']\n module_obj = self.env['academy.training.module']\n\n values = module_obj.get_template_values(\n self.competency_unit_ids, name=self.name)\n\n template = template_obj.create(values)\n\n if not no_open and template:\n return module_obj._template_act_window(template)\n","repo_name":"sotogarcia/odoo-academy","sub_path":"modules/academy_tests/models/academy_training_activity.py","file_name":"academy_training_activity.py","file_ext":"py","file_size_in_byte":4074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"30063041977","text":"import os\n\n\ncore_500_root = 'G:/MachineLearning/unbalance/core_500'\ncoreless_500_root = 'G:/MachineLearning/unbalance/coreless_5000'\ntest_root = 'G:/MachineLearning/unbalance/test_data1'\n\n\ndef get_train_list():\n pathList = [core_500_root, coreless_500_root, test_root]\n for _path in pathList:\n nameListPath = os.path.join(_path, 'nameList.txt')\n nameList = open(nameListPath, 'w')\n annoPath = os.path.join(_path, 'annotation')\n for fileName in os.listdir(annoPath):\n nameList.write(fileName.split(\".\")[0]+'\\n')\n nameList.close()\n\ndef get_test_list(_path):\n nameListPath = os.path.join(_path, 'core_coreless_test.txt')\n nameList = open(nameListPath, 'w')\n imgPath = os.path.join(_path, 'Image_test')\n for fileName in os.listdir(imgPath):\n nameList.write(fileName.split(\".\")[0]+'\\n')\n nameList.close()\n\nif __name__ == '__main__':\n #get_train_list()\n get_test_list('G:/MachineLearning/unbalance/test_data2')","repo_name":"wyc869609651/ssd-dection","sub_path":"utils/image_name_util.py","file_name":"image_name_util.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41287112611","text":"import base64\nimport os\nimport ssl\nimport tempfile\nimport warnings\nfrom collections import namedtuple\n\nimport aiohttp\nimport google.auth.transport.requests\nimport requests\nimport yaml\n\nConfig = namedtuple('Config', 'url token ca_cert client_cert')\nClientCert = namedtuple('ClientCert', 'crt key')\n\n# Location of service account tokens inside a Pod.\nFNAME_TOKEN = \"/var/run/secrets/kubernetes.io/serviceaccount/token\"\nFNAME_CERT = \"/var/run/secrets/kubernetes.io/serviceaccount/ca.crt\"\n\n\ndef load_incluster_config(fname_token=FNAME_TOKEN, fname_cert=FNAME_CERT):\n \"\"\"Return K8s access config from Pod service account.\n\n Returns None if we are not running in a Pod.\n\n Inputs:\n kubconfig: str\n Name of kubeconfig file.\n Returns:\n Config\n\n \"\"\"\n # Every K8s pod has this.\n server_ip = os.getenv('KUBERNETES_PORT_443_TCP_ADDR', None)\n\n # Sanity checks: URL and service account files must exist or we are not\n # inside a Pod.\n try:\n assert server_ip is not None\n assert os.path.exists(fname_token)\n assert os.path.exists(fname_cert)\n except AssertionError:\n return None\n\n # Return the compiled service account configuration.\n try:\n conf = Config(\n url=f'https://{server_ip}',\n token=open(fname_token, 'r').read(),\n ca_cert=fname_cert,\n client_cert=None,\n )\n return conf\n except FileNotFoundError:\n return None\n\n\ndef load_gke_config(kubeconfig, disable_warnings=False):\n \"\"\"Return K8s access config for GKE cluster described in `kubeconfig`.\n\n Returns None if `kubeconfig` does not exist or could not be parsed.\n\n Inputs:\n kubconfig: str\n Name of kubeconfig file.\n Returns:\n Config\n\n \"\"\"\n # Load `kubeconfig`. For this proof-of-concept we assume it contains\n # exactly one cluster and user.\n try:\n kubeconf = yaml.load(open(kubeconfig))\n except FileNotFoundError:\n return None\n assert len(kubeconf['clusters']) == 1\n assert len(kubeconf['users']) == 1\n\n # Unpack the user and cluster info.\n cluster = kubeconf['clusters'][0]['cluster']\n user = kubeconf['users'][0]\n\n # Return immediately if this does not look like a config file for GKE.\n try:\n assert user['user']['auth-provider']['name'] == 'gcp'\n except (AssertionError, KeyError):\n return None\n\n # Unpack the self signed certificate (Google does not register the K8s API\n # server certificate with a public CA).\n ssl_ca_cert_data = base64.b64decode(cluster['certificate-authority-data'])\n\n # Save the certificate to a temporary file. This is only necessary because\n # the requests library needs a path to the CA file - unfortunately, we\n # cannot just pass it the content.\n _, ssl_ca_cert = tempfile.mkstemp(text=False)\n with open(ssl_ca_cert, 'wb') as fd:\n fd.write(ssl_ca_cert_data)\n\n # Authenticate with Compute Engine using the default project.\n with warnings.catch_warnings(record=disable_warnings):\n cred, project_id = google.auth.default(\n scopes=['https://www.googleapis.com/auth/cloud-platform']\n )\n cred.refresh(google.auth.transport.requests.Request())\n\n # Return the config data.\n return Config(\n url=cluster['server'],\n token=cred.token,\n ca_cert=ssl_ca_cert,\n client_cert=None,\n )\n\n\ndef load_minikube_config(kubeconfig):\n # Load `kubeconfig`. For this proof-of-concept we assume it contains\n # exactly one cluster and user.\n kubeconf = yaml.load(open(kubeconfig))\n assert len(kubeconf['clusters']) == 1\n assert len(kubeconf['users']) == 1\n\n # Unpack the user and cluster info.\n cluster = kubeconf['clusters'][0]\n user = kubeconf['users'][0]\n\n # Do not proceed if this does not look like a Minikube cluster.\n # Return immediately if this does not look like a config file for GKE.\n try:\n assert cluster['name'] == 'minikube'\n except (AssertionError, KeyError):\n return None\n\n # Minikube uses client certificates to authenticate. We need to pass those\n # to the HTTP client of our choice when we create the session.\n client_cert = ClientCert(\n crt=user['user']['client-certificate'],\n key=user['user']['client-key'],\n )\n\n # Return the config data.\n return Config(\n url=cluster['cluster']['server'],\n token=None,\n ca_cert=cluster['cluster']['certificate-authority'],\n client_cert=client_cert,\n )\n\n\ndef load_auto_config(kubeconfig):\n # Try the POD service account (returns None if we are not in pod).\n conf = load_incluster_config()\n if conf is not None:\n return conf\n\n # Load minikube configuration from kubeconfig file.\n conf = load_minikube_config(kubeconfig)\n if conf is not None:\n return conf\n\n # Load GKE configuration from kubeconfig file. Will also get us a new\n # bearer token from GCloud. The `disable_warnings` is set to True to avoid\n # harmless warnings during the live presentation.\n conf = load_gke_config(kubeconfig, disable_warnings=True)\n if conf is not None:\n return conf\n\n return None\n\n\ndef setup_requests(config: Config):\n # Configure a 'requests' session with the correct CA and pre-load the\n # Bearer token.\n sess = requests.Session()\n sess.verify = config.ca_cert\n\n if config.token is not None:\n sess.headers = {'authorization': f'Bearer {config.token}'}\n if config.client_cert is not None:\n sess.cert = (config.client_cert.crt, config.client_cert.key)\n return sess\n\n\ndef setup_aiohttp(config: Config):\n ssl_context = ssl.create_default_context(cafile=config.ca_cert)\n if config.client_cert is not None:\n ssl_context.load_cert_chain(\n certfile=config.client_cert.crt,\n keyfile=config.client_cert.key\n )\n\n sess = aiohttp.ClientSession(\n connector=aiohttp.TCPConnector(ssl_context=ssl_context),\n headers={'authorization': f'Bearer {config.token}'},\n )\n return sess\n\n\nclass DotDict(dict):\n def __getattr__(self, key):\n return self[key]\n\n\ndef make_dotdict(data):\n if not isinstance(data, (list, tuple, dict)):\n return data\n\n # Recursively convert all elements in lists and dicts.\n if isinstance(data, (list, tuple)):\n return [make_dotdict(_) for _ in data]\n else:\n return DotDict({k: make_dotdict(v) for k, v in data.items()})\n","repo_name":"olitheolix/pyconau2018","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6492,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"16"} +{"seq_id":"13683532544","text":"import argparse\nfrom julia_rings.rings import ring_statistics\nfrom ase.io import read\nimport time\nimport numpy as np\nimport pickle\n\nparser = argparse.ArgumentParser(description=\"Ring Statistics Script\")\nparser.add_argument(\"input_file\", help=\"Input file in ASE-supported format\")\nparser.add_argument(\"-v\", \"--verbosity\", help=\"Verbosity level 0, 1\", default=0)\nparser.add_argument(\"-o\", \"--output\", help=\"Output file name\", default=\"rings_lists.json\")\nparser.add_argument(\"--maxpths\", help=\"Maximum number of paths to consider at each node\", default=1000)\nparser.add_argument(\"--maxlvl\", help=\"Rings of size up to maxlvl can be found\", default=12)\nparser.add_argument(\"-c\", \"--cutoff\", help=\"Cutoff for neighbour search. \"\n \"This accepts a global float. Adjust run_rings.py for more control\",\n default=2.2)\nparser.add_argument(\"--no-supercell\", help=\"Do not use supercells. Warning: same node may not appear twice in rings now\", action=\"store_true\")\nargs = parser.parse_args()\n\nv = int(args.verbosity)\n\nats = read(args.input_file, \"-1\")\n\nst = time.time()\nrs, rings = ring_statistics(ats, verbosity=v, mxpths=int(args.maxpths),\n outfile=args.output, maxlvl=int(args.maxlvl),\n cutoff=float(args.cutoff), no_supercell=args.no_supercell)\net = time.time()\nelapsed = et - st\nprint(f\"Elapsed time: {elapsed:.2f} seconds\")\n\nnp.savetxt('rings_stats.npy', rs)\nwith open('rings_lists.pkl', 'wb') as f:\n pickle.dump(rings, f)","repo_name":"MorrowChem/RingsStatisticsMatter.jl","sub_path":"run_rings.py","file_name":"run_rings.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"11150171151","text":"n = int(input())\nnumbers = list()\nfor _ in range(n):\n numbers.append(int(input()))\n\ndp = [1 for _ in range(n)]\n\nfor i in range(n):\n for j in range(i):\n if numbers[j] < numbers[i] and dp[j] >= dp[i]:\n dp[i] = dp[j] + 1\n\nmaxi = max(dp)\nprint(n - maxi)","repo_name":"twinklesu/algorithm_py","sub_path":"2631.py","file_name":"2631.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41391716145","text":"import pymongo\nfrom collections import Counter\n# import dateutil.parser\n\nfrom collections import Counter\nall_days = Counter()\n\nout_filename = \"exported_users.csv\"\nout_file = open(out_filename, \"w\")\nwrite_batchsize = 1024 # write in intervals\ninput_pcode = 34 # Only Istanbul users, use 0 if all users\n\nage_groups = [\"<=18\", \"19-29\", \"30-39\", \">=40\"]\n\nideology_1_labels = [\"turkish_nationalism\", \"conservatism\", \"islamism\", \"liberalism\", \"kemalism\"]\nideology_2_labels = [\"social_democracy\", \"socialism\", \"feminism\", \"environmentalism\",\n \"kurdish_national_movement\", \"secularism\"]\nideology_labels = [\"ide_\"+ide for ide in ideology_1_labels] + [\"ide_\"+ide for ide in ideology_2_labels]\n\nwelfare_labels = [\"social_policy\", \"labour_and_employment\", \"education\", \"health_and_public_health\",\n \"disability\", \"housing\"]\ndemocracy_labels = [\"elections_and_voting\", \"justice_system\", \"human_rights\", \"regime_and_constitution\",\n \"kurdish_question\"]\nbig5_labels = [\"internal_affairs\", \"national_defense\", \"corruption\", \"foreign_affairs\", \"economy\"]\nmunicipal_labels = [\"urban_public_infrastructure\", \"social_and_welfare_services\",\n \"environment_and_public_health\", \"housing\", \"animal_welfare\",\n \"local_politics\", \"culture\"]\nmunicipal_labels = [f\"municipal_{lab}\" for lab in municipal_labels]\ntopic_labels = welfare_labels + democracy_labels + big5_labels + municipal_labels\n\nemotion_labels = [\"notr\", \"mutluluk\", \"sevgi\", \"umut\", \"minnet\", \"saskinlik\", \"uzuntu\", \"kaygi\",\n \"korku\", \"umutsuzluk\", \"utanc\", \"pismanlik\", \"ofke\", \"igrenme\", \"arzu\",\n \"onaylama\", \"onaylamama\"]\n\nstance_labels = [\"pro\", \"against\", \"neutral\"]\n\negilim_labels = [\"irrelevant\", \"demand\", \"complaint\"]\n\nmunicipal_egilim_combinations = [f\"egilim_{egilim}-{lab}\" for egilim in egilim_labels[1:] for lab in municipal_labels]\n# topic_emotion_combinations = [f\"{emo}-{lab}\" for emo in emotion_labels for lab in topic_labels]\n# topic_stance_combinations = [f\"imam_{stance}-{lab}\" for stance in stance_labels for lab in topic_labels]\n\ntopic_emotion_combinations = [\"umut-social_policy\", \"umut-human_rights\", \"umut-economy\",\n \"umut-education\", \"umut-health_and_public_health\", \"umut-justice_system\",\n \"minnet-social_policy\", \"minnet-deprem\", \"uzuntu-deprem\", \"kaygi-deprem\",\n \"korku-social_policy\", \"korku-human_rights\", \"korku-economy\",\n \"korku-education\", \"korku-health_and_public_health\", \"korku-justice_system\",\n \"korku-deprem\", \"umutsuzluk-social_policy\", \"umutsuzluk-human_rights\",\n \"umutsuzluk-education\", \"umutsuzluk-health_and_public_health\",\n \"umutsuzluk-justice_system\", \"umutsuzluk-economy\", \"ofke-human_rights\",\n \"ofke-economy\", \"ofke-education\", \"ofke-health_and_public_health\",\n \"ofke-justice_system\", \"ofke-deprem\", \"arzu-social_policy\",\n \"arzu-human_rights\", \"arzu-education\", \"arzu-health_and_public_health\",\n \"arzu-justice_system\"] + [f\"{emo}-{lab}\" for emo in emotion_labels for lab in municipal_labels]\n# topic_stance_combinations = [\"kk_pro-social_policy\", \"kk_against-social_policy\", \"kk_neutral-social_policy\",\n# \"kk_pro-human_rights\", \"kk_against-human_rights\", \"kk_neutral-human_rights\",\n# \"kk_pro-economy\", \"kk_against-economy\", \"kk_neutral-economy\",\n# \"kk_pro-justice_system\", \"kk_against-justice_system\", \"kk_neutral-justice_system\",\n# \"erdogan_pro-social_policy\", \"erdogan_against-social_policy\", \"erdogan_neutral-social_policy\",\n# \"erdogan_pro-human_rights\", \"erdogan_against-human_rights\", \"erdogan_neutral-human_rights\",\n# \"erdogan_pro-economy\", \"erdogan_against-economy\", \"erdogan_neutral-economy\",\n# \"erdogan_pro-justice_system\", \"erdogan_against-justice_system\", \"erdogan_neutral-justice_system\"]\n# topic_stance_combinations = [\"hilmi_pro-social_policy\", \"hilmi_against-social_policy\", \"hilmi_neutral-social_policy\",\n# \"hilmi_pro-human_rights\", \"hilmi_against-human_rights\", \"hilmi_neutral-human_rights\",\n# \"hilmi_pro-economy\", \"hilmi_against-economy\", \"hilmi_neutral-economy\",\n# \"hilmi_pro-justice_system\", \"hilmi_against-justice_system\", \"hilmi_neutral-justice_system\"] + [f\"hilmi_{stance}-{lab}\" for lab in municipal_labels for stance in stance_labels]\n\n# pred_column_names = [\"total\"] + \\\n# [\"topic_\"+lab for lab in topic_labels] + \\\n# [\"emotion_\"+lab for lab in emotion_labels]\npred_column_names = [\"total\"] + \\\n [\"topic_\"+lab for lab in topic_labels] + \\\n [\"emotion_\"+lab for lab in emotion_labels] + \\\n municipal_egilim_combinations + topic_emotion_combinations# + \\\n # topic_stance_combinations\n # [\"hilmi_\"+lab for lab in stance_labels] + \\\n # [\"uskudar_\"+lab for lab in stance_labels] + \\\n\nunique_dates = [\"2019\", \"2020\", \"2021\", \"2022\", \"2023\"]\n# years = [\"2022\"]\n# months = [\"0\" + str(i) for i in range(1,10)] + [str(i) for i in range(10,13)]\n# unique_dates = []\n# for year in years:\n# for month in months:\n# unique_dates.append(\"{}-{}\".format(year, month))\n\n# # 0th week?\n# weeks = [\"0\" + str(i) for i in range(0,10)] + [str(i) for i in range(10,22)] # last data is from week 21\n# for week in weeks:\n# unique_dates.append(\"2023-week{}\".format(week))\n# # months = [\"0\" + str(i) for i in range(1,5)]\n# # for month in months:\n# # unique_dates.append(\"2023-{}\".format(month))\n\npred_columns = [\"{}_{}\".format(date, col_name) for date in unique_dates for col_name in pred_column_names]\nall_columns = [\"id_str\", \"gender\", \"age_group\", \"location\", \"total_tweet_num\"] + ideology_labels + pred_columns\nout_file.write(\",\".join(all_columns) + \"\\n\")\n\n# Connect to mongodb\nmongo_client = pymongo.MongoClient(\"mongodb://localhost:27017/\")\ndb = mongo_client[\"politus_twitter\"]\n# Get the collections(tables)\nuser_col = db[\"users\"]\ntweet_col = db[\"tweets\"]\n\n# 2968940 users have province_codes and are not organizations\n# query = {\"$or\": [{\"demog_pred_full.isOrg\": {\"$lte\": 0.5}}, {\"demog_pred_txt.isOrg\": {\"$lte\": 0.5}}], \"province_codes\": {\"$nin\": [None, []]}}\n\n# 2844242 users have province_codes and are not organizations\nquery = {\"province_codes\": {\"$nin\": [None, []]}, \"demog_pred_full.isOrg\": {\"$lte\": 0.5}}\n# query = {\"uskudar\": True}\ncolumns_to_return = [\"_id\", \"tweets\", \"favs\", \"demog_pred_full\", \"demog_pred_txt\", \"province_codes\"]\nresult = user_col.find(query, columns_to_return)\n\n# unique_dates = []\nwrite_idx = 0\nto_be_written = \"\"\nmissing_dates = []\nfor user_idx, user in enumerate(result):\n curr_write = []\n\n # get possible locations\n province_codes = {\"location\": [], \"description\": [], \"screen_name\": []}\n for c in user[\"province_codes\"]:\n province_codes[c[\"source\"]].append(str(c[\"pcode\"]))\n if len(province_codes[\"location\"]) > 0:\n out_pcode = Counter(province_codes[\"location\"]).most_common()[0][0]\n elif len(province_codes[\"description\"]) > 0:\n out_pcode = Counter(province_codes[\"description\"]).most_common()[0][0]\n elif len(province_codes[\"screen_name\"]) > 0:\n out_pcode = Counter(province_codes[\"screen_name\"]).most_common()[0][0]\n else:\n raise(\"Empty province_codes!\")\n\n if input_pcode != 0 and int(out_pcode) != input_pcode: continue\n\n # get age group and gender\n if user.get(\"demog_pred_full\", \"\") != \"\":\n gender = \"female\" if user[\"demog_pred_full\"][\"isFemale\"] >= 0.5 else \"male\"\n curr_age_preds = user[\"demog_pred_full\"][\"age\"]\n age_group = age_groups[curr_age_preds.index(max(curr_age_preds))]\n elif user.get(\"demog_pred_txt\", \"\") != \"\":\n gender = \"female\" if user[\"demog_pred_txt\"][\"isFemale\"] >= 0.5 else \"male\"\n curr_age_preds = user[\"demog_pred_txt\"][\"age\"]\n age_group = age_groups[curr_age_preds.index(max(curr_age_preds))]\n else:\n raise(\"No demog_pred for {}!\".format(user[\"_id\"]))\n\n # # To be used when we do not filter users without demography\n # gender = \"\"\n # age_group = \"\"\n # if user.get(\"demog_pred_full\", {}) != {} and user[\"demog_pred_full\"][\"isOrg\"] <= 0.5:\n # gender = \"female\" if user[\"demog_pred_full\"][\"isFemale\"] >= 0.5 else \"male\"\n # curr_age_preds = user[\"demog_pred_full\"][\"age\"]\n # age_group = age_groups[curr_age_preds.index(max(curr_age_preds))]\n\n curr_write.append(user[\"_id\"])\n curr_write.append(gender)\n curr_write.append(age_group)\n curr_write.append(out_pcode)\n\n curr_user_tweets = user.get(\"tweets\", []) + user.get(\"favs\", [])\n # get curr user's tweets' predictions\n tweet_preds = {}\n results = tweet_col.find({\"_id\": {\"$in\": [tweet[\"id\"] for tweet in curr_user_tweets]}},\n [\"ideology_1\", \"ideology_2\", \"welfare\", \"democracy\", \"big5\", \"municipal\", \"egilim\", \"emotions\"])\n for res in results:\n tweet_preds[res[\"_id\"]] = (res.get(\"egilim\", \"\"), res.get(\"emotions\", []), res.get(\"ideology_1\", []) + res.get(\"ideology_2\", []), res.get(\"welfare\", []) + res.get(\"democracy\", []) + res.get(\"big5\", []) + [f\"municipal_{lab}\" for lab in res.get(\"municipal\", [])])\n\n\n # process tweets\n curr_total_tweet_num = 0\n ide_dict = {}\n for ide in ideology_labels:\n ide_dict[ide] = 0\n tweets_dict = {}\n for col in pred_columns:\n tweets_dict[col] = 0\n\n for tweet in curr_user_tweets:\n if tweet_preds.get(tweet[\"id\"], None) == None:\n continue\n\n # Get date\n # date = tweet[\"date\"].strftime(\"%Y-%m\")\n date = tweet[\"date\"].strftime(\"%Y\")\n if date == \"2018\":\n # We only want ideologies for tweets between 2018 and 2022\n _, _, ideologies, _ = tweet_preds[tweet[\"id\"]]\n for ide in ideologies:\n ide = \"ide_\" + ide\n ide_dict[ide] += 1\n continue\n\n if date not in unique_dates:\n # print(unique_dates)\n if date not in missing_dates:\n print(\"Tweet date {} is not in unique_dates!\".format(date))\n missing_dates.append(date)\n continue\n\n # To print last three weeks daily tweet count\n if date in unique_dates[-3:]:\n curr_day = tweet[\"date\"].strftime(\"%Y-%m-%d\")\n all_days[curr_day] += 1\n\n curr_total_tweet_num += 1\n tweets_dict[date+\"_total\"] += 1\n\n # ideology, topic and emotions\n egilim, emotions, ideologies, topics = tweet_preds[tweet[\"id\"]]\n\n for ide in ideologies:\n ide = \"ide_\" + ide\n ide_dict[ide] += 1\n\n curr_emotions = []\n # at_least_one_emo = False\n for emo in emotions:\n curr_emotions.append(emo)\n emo = date + \"_emotion_\" + emo\n tweets_dict[emo] += 1\n # at_least_one_emo = True\n\n # if at_least_one_emo:\n # tweets_dict[date+\"_total_emotions\"] += 1\n\n curr_topics = []\n for topic in topics:\n curr_topics.append(topic)\n topic = date + \"_topic_\" + topic\n tweets_dict[topic] += 1\n\n # for topic_emotions\n for emo in curr_emotions:\n for topic in curr_topics:\n curr_key = date + \"_\" + emo + \"-\" + topic\n if tweets_dict.get(curr_key, \"\") != \"\":\n tweets_dict[curr_key] += 1\n\n # egilim\n if egilim != \"\":\n egilim = date + \"_egilim_\" + egilim\n # tweets_dict[egilim] += 1\n\n # for municipal_egilim_combinations\n for topic in curr_topics:\n curr_key = egilim+\"-\"+topic\n if tweets_dict.get(curr_key, \"\") != \"\":\n tweets_dict[curr_key] += 1\n\n # # kk stance\n # if kk_stance != \"\":\n # kk_stance = date + \"_kk_\" + kk_stance\n # tweets_dict[kk_stance] += 1\n # # for topic_stance\n # for topic in curr_topics:\n # curr_key = kk_stance+\"-\"+topic\n # if tweets_dict.get(curr_key, \"\") != \"\":\n # tweets_dict[curr_key] += 1\n\n\n # Write\n curr_write.append(curr_total_tweet_num)\n for col in ideology_labels: # need to write ordered\n curr_write.append(ide_dict[col])\n\n for col in pred_columns: # need to write ordered\n curr_write.append(tweets_dict[col])\n\n assert(len(curr_write) == len(all_columns))\n\n to_be_written += \",\".join([str(elem) for elem in curr_write]) + \"\\n\"\n write_idx += 1\n\n if write_idx == write_batchsize:\n out_file.write(to_be_written)\n write_idx = 0\n to_be_written = \"\"\n\n\nif write_idx > 0:\n out_file.write(to_be_written)\n\nout_file.close()\nprint(all_days)\n","repo_name":"politusanalytics/twitter_database","sub_path":"export_users_to_csv.py","file_name":"export_users_to_csv.py","file_ext":"py","file_size_in_byte":13187,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"71064647048","text":"from flask import Flask, request, jsonify\n\nfrom utils import result_data\n\napp = Flask(__name__)\n\n\n@app.route(\"/perform_query\")\ndef perform_query():\n try:\n file_name = request.args.get(\"file_name\")\n cmd1 = request.args.get(\"cmd1\")\n value1 = request.args.get(\"value1\")\n cmd2 = request.args.get(\"cmd2\")\n value2 = request.args.get(\"value2\")\n except (KeyError, ValueError) as error:\n return jsonify(error.messages), 400\n commands_list = {cmd1: value1, cmd2: value2}\n result = result_data(file_name, commands_list)\n return jsonify(result)\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"VaDmitrii/hw23_DV","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27906875169","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('fondos', '0006_movcaja_caja'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='movcaja',\n name='fecha',\n field=models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Fecha y hora'),\n ),\n migrations.AlterField(\n model_name='movcaja',\n name='tipoMovCaja',\n field=models.ForeignKey(verbose_name=b'Operacion', to='fondos.TipoMovCaja'),\n ),\n ]\n","repo_name":"smatht/inges","sub_path":"fondos/migrations/0007_auto_20180211_1209.py","file_name":"0007_auto_20180211_1209.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"35781546102","text":"from django.urls import path\nfrom django.views.decorators.cache import cache_page\n\nfrom review.views import (\n autocomplete,\n course_plots,\n course_reviews,\n department_reviews,\n instructor_for_course_reviews,\n instructor_reviews,\n)\n\n\nHOUR_IN_SECONDS = 60 * 60\nDAY_IN_SECONDS = HOUR_IN_SECONDS * 24\nMONTH_IN_SECONDS = DAY_IN_SECONDS * 30\n\nurlpatterns = [\n path(\n \"course/\",\n cache_page(MONTH_IN_SECONDS)(course_reviews),\n name=\"course-reviews\",\n ),\n path(\n \"course_plots/\",\n cache_page(DAY_IN_SECONDS)(course_plots),\n name=\"course-plots\",\n ),\n path(\n \"instructor/\",\n cache_page(MONTH_IN_SECONDS)(instructor_reviews),\n name=\"instructor-reviews\",\n ),\n path(\n \"department/\",\n cache_page(MONTH_IN_SECONDS)(department_reviews),\n name=\"department-reviews\",\n ),\n path(\n \"course//\",\n cache_page(MONTH_IN_SECONDS)(instructor_for_course_reviews),\n name=\"course-history\",\n ),\n path(\"autocomplete\", cache_page(MONTH_IN_SECONDS)(autocomplete), name=\"review-autocomplete\"),\n]\n","repo_name":"pennlabs/penn-courses","sub_path":"backend/review/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"16"} +{"seq_id":"5901967600","text":"from collections import Counter, defaultdict\nfrom itertools import combinations, chain \nimport re\nimport sys\n\ndef main():\n with open(\"3.in\") as f:\n #with open(\"test3.in\") as f:\n inp = f.read().strip()\n inp = inp.splitlines() \n\n p1 = part1(inp)\n p2 = part2(inp)\n\n print(f\"part1: {p1}\")\n print(f\"part2: {p2}\")\n\n for i, part in enumerate([p1, p2]):\n print(f'bash submit.sh {i+1} {part}')\n\n\ndef part1(inp):\n gamma = epsilon = \"\"\n\n for i in range(len(inp[0])):\n n_one = n_zero = 0\n for val in inp:\n curr = val[i]\n if curr == \"0\":\n n_zero += 1\n else:\n n_one += 1\n if n_one > n_zero:\n gamma += \"0\"\n epsilon += \"1\"\n else:\n gamma += \"1\"\n epsilon += \"0\"\n\n g_num = int(gamma, 2)\n e_num = int(epsilon, 2)\n\n return g_num * e_num\n \n\ndef findMostCommon(inp, i):\n n_one = n_zero = 0\n for val in inp:\n curr = val[i]\n if curr == \"0\":\n n_zero += 1\n else:\n n_one += 1\n\n return \"0\" if n_zero > n_one else \"1\"\n\ndef part2(inp):\n return getOx(inp) * getCom(inp)\n\n\ndef getOx(inp):\n vals = inp\n\n for i in range(len(inp[0])):\n if len(vals) == 1:\n break\n mostCom = findMostCommon(vals, i)\n vals = [v for v in vals if v[i] == mostCom]\n\n return int(vals[0], 2)\n\ndef getCom(inp):\n vals = inp\n\n for i in range(len(inp[0])):\n if len(vals) == 1:\n break\n mostCom = findMostCommon(vals, i)\n vals = [v for v in vals if v[i] != mostCom]\n\n return int(vals[0], 2)\n\nif __name__ == \"__main__\":\n main()","repo_name":"lkmsf/adventOfCode-","sub_path":"2021/day3/day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"55664541316","text":"from urllib.request import urlopen\nimport cgi\nimport math\nfrom datetime import datetime\n\n\ndef get_filename(url):\n r = urlopen(url)\n msg = r.info()['Content-Disposition']\n value, params = cgi.parse_header(msg)\n filename = params[\"filename\"]\n return filename\n\n\ndef jd_to_datetime(jd):\n\n jd = jd + 0.5\n F, I = math.modf(jd)\n I = int(I)\n A = math.trunc((I - 1867216.25)/36524.25)\n \n if I > 2299160:\n B = I + 1 + A - math.trunc(A / 4.)\n else:\n B = I\n \n C = B + 1524\n D = math.trunc((C - 122.1) / 365.25)\n E = math.trunc(365.25 * D)\n G = math.trunc((C - E) / 30.6001)\n day = C - E + F - math.trunc(30.6001 * G)\n \n if G < 13.5:\n month = G - 1\n else:\n month = G - 13\n \n if month > 2.5:\n year = D - 4716\n else:\n year = D - 4715\n\n d_ = day\n d = int(d_)\n h_ = (d_-d)*24\n h = int(h_)\n m_ = (h_-h)*60\n m = int(m_)\n s_ = (m_-m)*60\n s = int(s_)\n ms_ = (s_-s)*1000000\n ms = int(ms_)\n dt = datetime(year, month, d, h, m, s, ms)\n \n return dt\n\n\n","repo_name":"behrouzz/gaiadr3","sub_path":"gaiadr3/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1519484349","text":"def usun_duplikaty(tablica):\r\n tablica = tablica[:]\r\n for i in range(len(tablica)):\r\n tablica[i] = [tablica[i]] + [i]\r\n tablica.sort()\r\n \r\n temp = [[tablica[0][1], tablica[0][0]]]\r\n for i in range(1, len(tablica)):\r\n if tablica[i][0] != tablica[i-1][0]:\r\n temp.append([tablica[i][1], tablica[i][0]])\r\n temp.sort()\r\n \r\n result = []\r\n for element in temp:\r\n result.append(element[1])\r\n \r\n return result\r\n\r\n#niech n = len(tablica)\r\n#złożoność czasowa: O(n)\r\n#złożoność pamięciowa: O(n)\r\n#//**********************************\r\ntablica = [5,3,1,4,6,4,9]\r\nprint(usun_duplikaty(tablica))","repo_name":"KalBia/UWr","sub_path":"Python/lista5/zadanie_5_4.py","file_name":"zadanie_5_4.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"29156452795","text":"\"\"\"\n在二维平面上,有一个机器人从原点 (0, 0) 开始。给出它的移动顺序,判断这个机器人在完成移动后是否在 (0, 0) 处结束。\n\n移动顺序由字符串表示。字符 move[i] 表示其第 i 次移动。机器人的有效动作有 R(右),L(左),U(上)和 D(下)。如果机器人在完成所有动作后返回原点,则返回 true。否则,返回 false。\n\n注意:机器人“面朝”的方向无关紧要。 “R” 将始终使机器人向右移动一次,“L” 将始终向左移动等。此外,假设每次移动机器人的移动幅度相同。\n\n示例 1:\n输入: \"UD\"\n输出: true\n解释:机器人向上移动一次,然后向下移动一次。所有动作都具有相同的幅度,因此它最终回到它开始的原点。因此,我们返回 true。\n\"\"\"\n\n\nclass Solution(object):\n def judgeCircle(self, moves):\n \"\"\"\n :type moves: str\n :rtype: bool\n \"\"\"\n moves_dict = {'U':0, 'D':0, 'L': 0, 'R': 0}\n for ch in moves:\n moves_dict[ch] += 1\n if moves_dict['U'] == moves_dict['D'] and moves_dict['L'] == moves_dict['R']:\n return True\n return False\n","repo_name":"XinZhaoFu/leetcode_moyu","sub_path":"657机器人能否返回原点.py","file_name":"657机器人能否返回原点.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"30047267722","text":"from tkinter import *\nfrom tkinter import ttk\nimport tkinter as tk\nfrom ControladorBD import *\n\n\n#Creamos un objeto de tipo controlador\ncontrolador = controladorBD()\n#Procedemos a guardar usuarios usando el metodo GuardarUsuarios() del objeto controlador\ndef ejecutaInsert():\n controlador.GuardarUsuario(varNom.get(),varCor.get(),varCon.get())\n#Funcion para buscar un usuario\ndef ejecutaSelectU():\n rsUsuario= controlador.consultaUsuario(varBus.get())\n for usu in rsUsuario:\n cadena=str(usu[0])+\" \"+usu[1]+\" \"+usu[2]+\" \"+str(usu[3])\n if(rsUsuario):\n textBus.insert(\"0.0\",cadena)\n else:\n messagebox.showinfo(\"No encontrado\", \"Usuario no registrado en la BD\")\n#Función para mostrar a todos los usuarios \ndef ejecutaConsultaUsu():\n consulta=controlador.consultarUsu()\n tabUsu.delete(*tabUsu.get_children())\n for user in consulta:\n tabUsu.insert(\"\",tk.END,text=\"\",values=user)\n#Funcion para actualizar los datos del usuario\ndef ejecutaActualizar():\n rsUsuario=controlador.consultaUsuario(variID.get())\n if(rsUsuario):\n controlador.actualizarUsu(variID.get(),variNom.get(),variCor.get(),variCon.get())\n else:\n messagebox.showerror(\"ERROR\",\"No hay usuario registrado en la BD\")\ndef ejecutaBuscar():\n rsUsuario=controlador.consultaUsuario(Varid.get())\n textBus1.delete(\"1.0\",\"end\")\n for usu in rsUsuario:\n cadena1=str(usu[0])+\" \"+usu[1]+\" \"+usu[2]+\" \"+str(usu[3])\n if(rsUsuario):\n textBus1.insert(\"0.0\",cadena1)\n else:\n messagebox.showerror(\"ERROR\",\"No hay usuario registrado en la BD\")\ndef ejecutaEliminarU():\n conf=messagebox.askyesno(\"ELIINAR USUARIO\",\"¿Seguro que desea eliminar el usuario?\")\n if (conf==True):\n try:\n controlador.EliminarUsu(Varid.get())\n except sqlite3.OperationalError:\n messagebox.showerror(\"ERROR\",\"Error en la consulta\")\n\nventana=Tk()\nventana.title(\"CRUD Usuarios\")\nventana.geometry(\"500x300\")\n\npanel=ttk.Notebook(ventana)\npanel.pack(fill=\"both\",expand=\"yes\")\n\npestana1=ttk.Frame(panel)\npestana2=ttk.Frame(panel)\npestana3=ttk.Frame(panel)\npestana4=ttk.Frame(panel)\npestana5=ttk.Frame(panel)\n\n#Comienza la pestaña 1: Formulario de Usuarios\ntitulo=Label(pestana1,text=\"Registro de Usuarios\",fg=\"Blue\",font=(\"Modern\",18)).pack()\n#Nombre\nvarNom=tk.StringVar()\nlblNom=Label(pestana1,text=\"Nombre: \").pack()\ntxtNom=Entry(pestana1,textvariable=varNom).pack()\n#Correo\nvarCor=tk.StringVar()\nlblCor=Label(pestana1,text=\"Correo: \").pack()\ntxtCor=Entry(pestana1,textvariable=varCor).pack()\n#Contraseña\nvarCon=tk.StringVar()\nlblCon=Label(pestana1,text=\"Contraseña: \").pack()\ntxtCon=Entry(pestana1,textvariable=varCon).pack()\n#Boton\nbtnGuardar=Button(pestana1,text=\"Guardar Usuario\",command=ejecutaInsert).pack()\n\n#pestaña 2:Buscar Usuario\ntitulo2= Label(pestana2,text=\"Buscar Usuario\",fg=\"green\",font=(\"Modern\",18)).pack()\nvarBus=tk.StringVar()\nlblid=Label(pestana2,text=\"Identificador de Usuario: \").pack()\ntxtid=Entry(pestana2,textvariable=varBus).pack()\nbtnBusqueda=Button(pestana2,text=\"Buscar\",command=ejecutaSelectU).pack()\nsubBus=Label(pestana2,text=\"Registrado: \",fg=\"blue\",font=(\"Modern\",15)).pack()\ntextBus=tk.Text(pestana2,height=5,width=52)\ntextBus.pack()\n\n#Pestaña 3: Consultar usuario\ntitulo3=Label(pestana3,text=\"Consultar Usuarios\",fg=\"red\",font=(\"Modern\",18)).pack()\nbtnConsulta=Button(pestana3,text=\"Consultar\",command=ejecutaConsultaUsu).pack()\n#Tabla\ncolumns=(\"id\",\"nombre\",\"correo\",\"contra\")\ntabUsu=ttk.Treeview(pestana3,columns=columns,show=\"headings\")\ntabUsu.column(\"id\",anchor=tk.W,width=30)\ntabUsu.column(\"nombre\",anchor=tk.W,width=150)\ntabUsu.column(\"correo\",anchor=tk.W,width=150)\ntabUsu.column(\"contra\",anchor=tk.W,width=150)\ntabUsu.heading(\"id\",text=\"ID\")\ntabUsu.heading(\"nombre\",text=\"NOMBRE\")\ntabUsu.heading(\"correo\",text=\"CORREO\")\ntabUsu.heading(\"contra\",text=\"CONTRASEÑA\")\ntabUsu.pack()\n#Pestaña 4: Actualizar Usuario\ntitulo4 = Label(pestana4, text = \"Actualizar Usuario\", fg = \"#84A7E5\", font = (\"Modern\", 18)).pack()\nvariID = tk.StringVar()\nvariNom = tk.StringVar()\nvariCor = tk.StringVar()\nvariCon = tk.StringVar()\nlablid = Label(pestana4, text = \"ID de usuario: \").pack()\ntextid = Entry(pestana4, textvariable = variID).pack()\nlablNom = Label(pestana4, text = \"Escribe el nuevo nombre de Usuario: \").pack()\ntextNom = Entry(pestana4, textvariable = variNom).pack()\nlablCor = Label(pestana4, text = \"Escribe el nuevo correo electronico: \").pack()\ntextCor = Entry(pestana4, textvariable = variCor).pack()\nlablCon = Label(pestana4, text = \"Escribe la nueva contraseña: \").pack()\ntextCon = Entry(pestana4, textvariable = variCon).pack()\nbtnActualizar=Button(pestana4,text=\"Actualizar\",command=ejecutaActualizar).pack()\n#Pestaña 5: Eliminar Usuario\ntitulo5 = Label(pestana5, text = \"Eliminar Usuario\", fg = \"#D73F3F\", font = (\"Modern\", 18)).pack()\nVarid=tk.StringVar()\nlablid=Label(pestana5,text=\"ID del usuario: \").pack()\ntextid=Entry(pestana5,textvariable=Varid).pack()\nbtnBuscarUsu=Button(pestana5,text=\"Buscar\",command=ejecutaBuscar).pack()\nsubBus1=Label(pestana5,text=\"Registrado: \",fg=\"#5DD73F\",font=(\"Modern\",15)).pack()\ntextBus1=tk.Text(pestana5,height=5,width=52)\ntextBus1.pack()\nbtnEliminar=Button(pestana5,text=\"Eliminar\",command=ejecutaEliminarU).pack()\n\npanel.add(pestana1,text=\"Formulario de Usuarios\")\npanel.add(pestana2,text=\"Buscar Usuario\")\npanel.add(pestana3,text=\"Consultar Usuarios\")\npanel.add(pestana4,text=\"Actualizar Usuarios\")\npanel.add(pestana5,text=\"Eliminar Usuario\")\n\n\nventana.mainloop()","repo_name":"JessicaBarradas/Practica_9","sub_path":"TkintherSqlite/UsuariosCRUD.py","file_name":"UsuariosCRUD.py","file_ext":"py","file_size_in_byte":5517,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"33879078762","text":"from aiogram import types\nfrom aiogram.dispatcher import FSMContext\n\nfrom data.config import ADMINS\nfrom keyboards.default.keyboards import main_menu, back, admin_main_menu\nfrom loader import dp, db, bot\nfrom states.main_states import Complete, SetPrice\n\n\n@dp.message_handler(text=\"↔️ Mijoz rejimiga o'tish\")\nasync def switch_mode(message: types.Message):\n await message.answer(\"Mijoz rejimiga o'tildi.\\nAgar admin sahifasiga qaytmochi bo'lsangiz /start ni bosing\", reply_markup=main_menu)\n\n@dp.message_handler(commands=[\"set_completed\"])\n@dp.message_handler(text=\"✔️ Buyurtma tugatildi\")\nasync def get_id(message: types.Message):\n await message.answer(\"Buyurtma raqamini kiriting: \", reply_markup=back)\n await Complete.id.set()\n\n@dp.message_handler(state=Complete.id)\nasync def completed(message: types.Message, state=FSMContext):\n try:\n if message.text.isdigit():\n await message.answer(f\"Siz kiritdingiz: {message.text}\", reply_markup=main_menu)\n order_record = await db.get_order(id=int(message.text))\n order_id = order_record['id'] # Extract the id from the record\n await db.completed(id=order_id) # Pass the id to the completed method\n service_id = int(order_record['service_id'])\n service = await db.get_service(service_id)\n user = await db.select_user(id=int(order_record['user_id']))\n await message.answer(f\"Id: {order_record['id']} \\n\"\n f\"Manzil: {order_record['address']} \\n\"\n f\"Buyurtma holati: Topshirildi\\n\"\n f\"Xizmat turi: {service[0]['name']}\\n\"\n f\"Buyurtmachi telefon raqami: {order_record['phone_number']}\\n\"\n \"\\nBuyurtma yakunlandi!\")\n await bot.send_message(int(user['telegram_id']), f\"{order_record['id']} raqamli buyurtmangiz yakunlandi! Tez orada yetkazib beriladi\")\n await state.finish()\n elif message.text == \"🔙 Ortga\":\n await state.finish()\n if message.from_user.id == int(ADMINS[0]):\n await message.answer(\"Ortga qaytildi.\", reply_markup=admin_main_menu)\n else:\n await message.answer(\"Ortga qaytildi.\", reply_markup=main_menu)\n except:\n await message.answer(\"Bu raqamga tegishli buyurtma yo'q\\nTekshirib qaytadan kiriting: \")\n\n\ndef send_completed(chat_id):\n # Your existing code...\n\n # Assuming order_record is your Order model instance\n if order_record.is_completed:\n bot.message.answer(chat_id=chat_id, message=f\"Completed\\n\"\n f\"Id: {order_record.id}\\n\"\n f\"Manzil: {order_record.address}\\n\"\n f\"Xizmat turi: {order_record.service.name}\\n\"\n f\"Buyurtmachi telefon raqami: {order_record.phone_number}\\n\"\n f\"\\nBuyurtma yakunlandi!\")\n\n\n\n@dp.message_handler(commands=[\"set_price\"])\n@dp.message_handler(text=\"💲 Narx belgilash\")\nasync def get_id(message: types.Message, state: FSMContext):\n await message.answer(\"Buyurtma raqamini kiriting: \", reply_markup=back)\n await SetPrice.id.set()\n\n\n@dp.message_handler(state=SetPrice.id)\nasync def get_price(message: types.Message, state=FSMContext):\n global order_record\n if message.text.isdigit():\n await message.answer(f\"Buyurtma umumiy narxini kiriting:\")\n order_record = await db.get_order(id=int(message.text))\n await SetPrice.price.set()\n elif message.text == \"🔙 Ortga\":\n await state.finish()\n if message.from_user.id == int(ADMINS[0]):\n await message.answer(\"Ortga qaytildi.\", reply_markup=admin_main_menu)\n else:\n await message.answer(\"Ortga qaytildi.\", reply_markup=main_menu)\n\n else:\n await message.answer(\"Nakladnoy raqamini kiriting! \\n\\nSiz raqam kiritmadingiz!\", reply_markup=back)\n\n\n\n\n@dp.message_handler(state=SetPrice.price)\nasync def get_price(message: types.Message, state=FSMContext):\n if message.text.isdigit():\n order = order_record['id']\n update = await db.update_price(id=int(order), price=int(message.text))\n await state.finish()\n await message.answer(\"Buyurtmaga narx belgilandi\", reply_markup=admin_main_menu)\n elif message.text == \"🔙 Ortga\":\n await state.finish()\n if message.from_user.id == int(ADMINS[0]):\n await message.answer(\"Ortga qaytildi.\", reply_markup=admin_main_menu)\n else:\n await message.answer(\"Ortga qaytildi.\", reply_markup=main_menu)\n else:\n await message.answer(\"Son kiriting:\")\n\n#\n# @dp.message_handler(text=\"🗓 Buyurtmalar\")\n# async def history(message: types.Message):\n# await message.answer()","repo_name":"salohiddinusmonaliyev/gilam-yuvish-bot","sub_path":"handlers/users/admin_handlers.py","file_name":"admin_handlers.py","file_ext":"py","file_size_in_byte":4926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"43104928722","text":"import socket\r\nimport threading\r\n\r\nnickname = input(\"Choose a nickname: \")\r\n\r\nclient = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nclient.connect(('127.0.0.1', 55555))\r\n\r\n\r\ndef receive():\r\n while True:\r\n try:\r\n message = client.recv(1024).decode('ascii')\r\n if message == 'NICK':\r\n client.send(nickname.encode('ascii'))\r\n else:\r\n print(message)\r\n\r\n except:\r\n print(\"Une ERREUR c'est produite\")\r\n # fermer la connection\r\n client.close()\r\n break\r\n\r\n\r\ndef write():\r\n while True:\r\n message = f'{nickname}: {input(\"\")}' # le client tape son message\r\n client.send(message.encode('ascii')) # le client envoi le message avec la touche entrer\r\n\r\n\r\nreceive_thread = threading.Thread(target=receive)\r\nreceive_thread.start()\r\n\r\nwrite_trhead = threading.Thread(target=write)\r\nwrite_trhead.start()\r\n\r\n\r\n# on peut ajouter des kik, ban, emoji\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"iudhael/divers-program-python","sub_path":"reseaux/TCP chat room/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"44193552892","text":"import numpy as np\nimport pickle\nimport sys\n\n# =================================\n# Code to form the single frame dictionary from the mstcn single frame files\n# Example command: python utility/convert_timestamp_annot.py data/gtea_annotation_all.npy\n# /mnt/ssd/all_users/dipika/ms_tcn/data/gtea/groundTruth/ data/gtea_single_frame.pkl 1\n# =================================\n\nannota_selected_frame = np.load(sys.argv[1], allow_pickle=True).item()\n\ngroundtruth_dir = sys.argv[2]\ndump_file_name = sys.argv[3]\nsample_rate = int(sys.argv[4])\n\nnew_selected_frame_dict = {}\n\nfor filename in annota_selected_frame.keys():\n selected_frame_indices = annota_selected_frame[filename]\n \n gd_labels = np.array(open(groundtruth_dir + filename, \"r\").read().split(\"\\n\")[0:-1])\n if sample_rate > 1:\n gd_labels = gd_labels[::sample_rate]\n\n selected_frames_labels = gd_labels[selected_frame_indices].tolist()\n\n new_selected_frame_dict[filename] = [(ele1, ele2) for ele1, ele2 in zip(selected_frame_indices, selected_frames_labels)]\n\npickle.dump(new_selected_frame_dict, open(dump_file_name, \"wb\"))\n","repo_name":"rahulrahaman/Timestamp-and-SkipTag","sub_path":"utility/convert_timestamp_annot.py","file_name":"convert_timestamp_annot.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"12534641521","text":"# Torch\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass GCNConv(nn.Module):\n def __init__(self, _in_feature, _out_feature):\n super(GCNConv, self).__init__()\n self.linear = nn.Linear(_in_feature, _out_feature, bias=False)\n\n def forward(self, _x: torch.Tensor, _adjacency_hat: torch.sparse_coo_tensor):\n x = self.linear(_x)\n x = torch.sparse.mm(_adjacency_hat, x)\n return x\n\n\nclass GCN(nn.Module):\n def __init__(self, _input_size, _hidden_size, _output_size, _num_hidden_layers=0, _dropout=0.1, _residual=False):\n super(GCN, self).__init__()\n\n self.dropout = _dropout\n self.residual = _residual\n\n self.input_conv = GCNConv(_input_size, _hidden_size)\n self.output_conv = GCNConv(_hidden_size, _output_size)\n\n self.hidden_convs = nn.ModuleList([GCNConv(_hidden_size, _hidden_size) for _ in range(_num_hidden_layers)])\n\n def forward(self, _x: torch.Tensor, _adjacency_hat: torch.sparse_coo_tensor, _labels: torch.Tensor = None):\n x = F.dropout(_x, p=self.dropout, training=self.training)\n x = F.relu(self.input_conv(x, _adjacency_hat))\n for conv in self.hidden_convs:\n if self.residual:\n x = F.relu(conv(x, _adjacency_hat)) + x\n else:\n x = F.relu(conv(x, _adjacency_hat))\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = self.output_conv(x, _adjacency_hat)\n\n if _labels is None:\n return x\n\n loss = nn.CrossEntropyLoss()(x, _labels)\n return x, loss\n","repo_name":"Magista08/GCN","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"3595913995","text":"#Creating a stack with list.\n\nclass Stack:\n def __init__(self):\n self.list = []\n \n def __str__(self):\n values = reversed(self.list)\n values = [str(x) for x in values]\n return '\\n'.join(values)\n \n def isEmpty(self):\n if self.list == []:\n return True\n else:\n return False\n\n def push(self,value):\n self.list.append(value)\n return \"Element has been added to the stack\"\n \n def pop(self):\n if self.isEmpty():\n return \"Stack does not exist\"\n else:\n return self.list.pop()\n \n def peek(self):\n if self.isEmpty():\n return \"Stack does not exist\"\n else:\n return self.list[-1]\n\n def deleteStack(self):\n if self.isEmpty():\n return \"Stack does not exist\"\n else:\n if (input('Do you want to delete the stack?').upper()) == 'Y':\n self.list = None\n\n\n\ncustomStack = Stack()\ncustomStack.push(1)\ncustomStack.push(2)\ncustomStack.push(3)\ncustomStack.push(5)\nprint(customStack.peek())","repo_name":"neel2306/DSA_python","sub_path":"Stacks/stack_with_list.py","file_name":"stack_with_list.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"55665485506","text":"\"\"\"ColumnLayout\n\"\"\"\n\nmyLayout = cmds.columnLayout()\ncmds.button(label = \"text\", parent = myLayout)\ncmds.iconTextButton(label = \"text\", style=\"textOnly\", w=30, h=30, parent = myLayout)\n\n\"\"\"rowLayout\n\"\"\"\n\nfrm = cmds.rowLayout( numberOfColumns = 4,\n cl4 = [\"right\", \"left\", \"center\", \"center\"],\n co4 = [0,0,100,0],\n cw4=[100,50,50,50])\n\nb1 = cmds.iconTextButton(style='textOnly', w=30, h=30, label=\"b1\", parent = frm)\nb2 = cmds.iconTextButton(style='textOnly', w=30, h=30, label=\"b2\", parent = frm)\nb3 = cmds.iconTextButton(style='textOnly', w=30, h=30, label=\"b3\", parent = frm)\nb4 = cmds.button(label=\"test\", parent = frm)\n\n\"\"\"formLayout\n\"\"\"\n\nfrm = cmds.formLayout(w = 300, h = 200)\n\nb1 = cmds.iconTextButton(style='textOnly', w=30, h=30, label=\"b1\", parent = frm)\nb2 = cmds.iconTextButton(style='textOnly', w=30, h=30, label=\"b2\", parent = frm)\nb3 = cmds.iconTextButton(style='textOnly', w=30, h=30, label=\"b3\", parent = frm)\nb4 = cmds.button(label=\"test\", parent = frm)\n\ncmds.formLayout(frm, edit=True, attachForm = [\n (b2, \"left\", 100)\n ],\n attachControl = [\n (b1, \"left\", 50, b2)\n ],\n attachPosition = [\n (b3, \"left\", -15, 50),\n (b3, \"top\", -15, 50)\n ])\n\n\"\"\"frameLayout\n\"\"\"\n\nclm = cmds.columnLayout()\nfrm = cmds.frameLayout( \"testFrameLayoutA\",\n labelVisible = True,\n collapse = 1,\n label = \"Test Shit\",\n w = 300,\n collapsable=True,\n borderVisible=False,\n parent = clm)\nb1 = cmds.iconTextButton(style='textOnly', w=30, h=30, label=\"b1\", parent = frm)\nb2 = cmds.iconTextButton(style='textOnly', w=30, h=30, label=\"b2\", parent = frm)\nb3 = cmds.iconTextButton(style='textOnly', w=30, h=30, label=\"b3\", parent = frm)\nb4 = cmds.button(label=\"test\", parent = frm)","repo_name":"idushie/Animation-school","sub_path":"W_5/examples/LayoutsExamples.py","file_name":"LayoutsExamples.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"43139955114","text":"import copy\n\nimport torch\nfrom rdkit.Chem import AllChem\nfrom rdkit import Chem, DataStructs\nfrom MARS.common.chem import break_bond_mol, add_arm, check_validity, mol_to_dgl, draw_mol, find_common_scaffold\nfrom MARS.datasets.utils import load_vocab\nfrom MARS.common.utils import sample_idx\nimport numpy as np\nimport math\nimport random\n\nfrom MARS.estimator.scorer.scorer import get_scores, get_score\nfrom rdkit import Chem\n\nfrom MARS.datasets.datasets import ImitationDataset\nfrom torch.utils import data\n\nclass Environement:\n\n def __init__(self):\n self.vocab = load_vocab('MARS/data', 'chembl', 1000)\n with open('MARS/data/actives_gsk3b,jnk3.txt','r') as f:\n smiles = f.readlines()\n smiles = [line.split()[0] for line in smiles]\n smiles = [line.split(',')[0] for line in smiles]\n ref_smiles = smiles[1:]\n ref_mols = [Chem.MolFromSmiles(smi) for smi in ref_smiles if Chem.MolFromSmiles(smi)]\n\n ref_fps = [AllChem.GetMorganFingerprintAsBitVect(x, 3, 2048) for x in ref_mols]\n self.ref_mol_dict = {\n 'fps' : ref_fps,\n 'mols' : ref_mols,\n 'smiles' : ref_smiles,\n }\n\n new_smiles = []\n new_fps = []\n new_mols = []\n self.new_mol_dict = {\n 'fps': new_fps,\n 'mols': new_mols,\n 'smiles': new_smiles\n\n }\n\n self.nb_episode = 0\n self.threshold_nov = 0.4\n ##TODO change value of threshold\n self.threshold_div = 0.3\n self.thresholf_mydiv = 0.5\n self.max_step = 10\n self.step = 0\n self.new = True\n self.current_mol = None\n self.current_graph = None\n self.indice_init_mol = None ####### indice in the set (subtract len(ref_mols) if init mol from the new_mol_set)\n self.init_mol_from_ref = True\n\n self.size_ref_dataset = len(self.ref_mol_dict['mols'])\n self.nb_call_reward = 0\n\n\n with open('scaffold.txt','r') as f:\n smiles = f.readlines()\n smiles = [line.split()[0] for line in smiles]\n scaffold = [Chem.MolFromSmiles(smi) for smi in smiles if Chem.MolFromSmiles(smi)]\n self.scaffold_mol_dict = {\n 'mols' : scaffold,\n 'smiles' : smiles,\n 'UCB' : list(np.zeros(len(scaffold))),\n 'R' : list(np.zeros(len(scaffold))),\n 'N' : list(np.zeros(len(scaffold)))\n }\n\n self.previous_score = None\n\n self.imitation_dataset = ImitationDataset(graphs=[], edits= {\n 'act' :[],\n 'del' : [],\n 'add' : [],\n 'arm' : [],\n 'global_act' : []\n })\n\n self.MaxImittationSetSize = 50000\n self.index_scaffold = None\n\n\n def reset(self):\n self.new = True\n self.step = 0\n self.current_mol = None\n self.current_graph = None\n self.index_scaffold = None\n if self.new :\n self.current_mol, self.current_graph = self.init_mol()\n self.new = False\n\n self.nb_episode +=1\n\n\n\n def init_mol(self):\n \"\"\"\n return init molecule\n \"\"\"\n R = np.array(self.scaffold_mol_dict['R'])\n N = np.array(self.scaffold_mol_dict['N'])\n\n ##TODO change the way to init the molecule\n prob = 1 + (1+R)/(N+1e-6)\n idx_init_mol = sample_idx(prob)\n init_mol = self.scaffold_mol_dict['mols'][idx_init_mol]\n init_graph = mol_to_dgl(init_mol)\n self.index_scaffold = idx_init_mol\n self.previous_score,_,_ = self.compute_reward(init_mol, True)\n return init_mol, init_graph\n\n\n def edit(self, graph, mol, action, del_idx, add_idx, arm_idx):\n \"\"\"\n\n @param graph: dgl graph\n @param mol: molecule\n @param action: action 0 or 1\n @param del_idx: idx in graph\n @param arms_idx: arm idx\n @param add_idx: add idx\n\n\n @return new_mol, not_changed\n \"\"\"\n\n if action == 0: ####del\n\n u = graph.all_edges()[0][del_idx].item()\n v = graph.all_edges()[1][del_idx].item()\n try:\n new_mol = break_bond_mol(mol, u, v)\n if new_mol.GetNumBonds() <= 0:\n raise ValueError\n\n except ValueError:\n new_mol = None\n\n if check_validity(new_mol):\n return new_mol, False\n\n elif action ==1 : ###add arm\n new_arm = self.vocab.arms[arm_idx]\n try :\n new_mol = add_arm(mol, u=add_idx, arm = new_arm.mol, v = new_arm.v)\n except :\n new_mol = None\n\n if check_validity(new_mol) and new_mol.GetNumAtoms() <= 50: # limit size\n return new_mol, False\n else :\n NotImplementedError\n\n\n return mol, True\n\n def next_step(self, action):\n \"\"\"\n\n :param action: dict 'act', 'del', 'add', 'arm'\n :return: state (new_mol), reward, done(if no add to new_mols)\n \"\"\"\n\n self.step += 1\n act = action['act']\n del_idx = action['del']\n add_idx = action['add']\n arm_idx = action['arm']\n global_action = action['global_act']\n\n ###make a copy of graph and molecule\n mol = copy.deepcopy(self.current_mol)\n graph = copy.deepcopy(self.current_graph)\n\n ###edition of molecule\n new_mol, not_changed = self.edit(graph, mol, action=act, del_idx=del_idx, add_idx=add_idx, arm_idx=arm_idx)\n if not_changed == False:\n self.current_mol = new_mol\n self.current_graph = mol_to_dgl(new_mol)\n\n\n ###compute reward\n reward, CP, CND = self.compute_reward(new_mol, not_changed)\n\n\n\n ##### save molecule according its chemical property\n if CP and CND:\n self.new_mol_dict['mols'].append(new_mol)\n self.new_mol_dict['fps'].append(AllChem.GetMorganFingerprintAsBitVect(new_mol, 3, 2048))\n self.update_scaffold(new_mol)\n self.scaffold_mol_dict['R'][self.index_scaffold] += 1\n\n\n\n\n # ###improvement imitation learning\n if self.previous_score < reward:\n self.update_imitation_dataset(action, graph)\n\n self.previous_score = reward\n if self.step >= self.max_step:\n return self.current_graph, reward, True\n else:\n return self.current_graph, reward, False\n\n\n def update_imitation_dataset(self, action, graph):\n edits = copy.deepcopy(action)\n for k in edits:\n edits[k] = [edits[k]]\n dataset = ImitationDataset(graphs=[graph], edits=edits)\n self.imitation_dataset.merge_(dataset)\n n_sample = len(dataset)\n if n_sample > self.MaxImittationSetSize:\n indices = [i for i in range(n_sample)]\n random.shuffle(indices)\n indices = indices[:50000]\n self.imitation_dataset = data.Subset(self.imitation_dataset, indices)\n self.imitation_dataset = ImitationDataset.reconstruct(self.imitation_dataset)\n\n def update_scaffold(self, mol):\n for m in self.ref_mol_dict['mols'] + self.new_mol_dict['mols']:\n scaffold = find_common_scaffold(mol, m)\n if scaffold is not None:\n self.scaffold_mol_dict['R'].append(0)\n self.scaffold_mol_dict['N'].append(0)\n self.scaffold_mol_dict['mols'].append(scaffold)\n\n def compute_reward(self, new_mol, not_change):\n \"\"\"\n\n :param new_mol: new_state mol\n :param not_change: if mol change or not (if don't change => invalid action)\n :return: reward\n \"\"\"\n gsk3b_score, jnk3_score, qed_score, sa_score = self.compute_prop_score(new_mol)\n sim_with_ref = self.compute_sim_with_data(new_mol, self.ref_mol_dict['fps'])\n if len(self.new_mol_dict['fps']):\n sim_with_proposal = self.compute_sim_with_data(new_mol, self.new_mol_dict['fps'])\n else :\n sim_with_proposal = [0]\n C_div = int(np.array(sim_with_proposal).mean()=0.5) * int(jnk3_score>=0.5) * int(qed_score>=0.6) * int(sa_score>=0.67)\n\n prop_reward = gsk3b_score* jnk3_score * qed_score* sa_score /4\n nov_reward = 1 - max(sim_with_ref) if max(sim_with_ref) >= self.threshold_nov + 0.1 else 1 - (self.threshold_nov) + 0.1\n # div_reward = 1 - max(sim_with_proposal) if max(sim_with_proposal) >= self.threshold_nov + 0.1 else 1 - (self.threshold_nov) + 0.1\n div_reward = 1 - max(sim_with_proposal)\n reward = prop_reward * nov_reward * div_reward\n return reward, CP, CND\n\n def compute_prop_score(self, mol):\n gsk3b_score = get_scores('gsk3b', [mol])[0]\n jnk3_score = get_scores('jnk3', [mol])[0]\n sa_score = get_score('sa', mol)\n qed_score = get_score('qed', mol)\n\n return gsk3b_score, jnk3_score, qed_score, sa_score\n\n def compute_sim_with_data(self, mol, data):\n fps = AllChem.GetMorganFingerprintAsBitVect(mol, 3, 2048)\n sim = DataStructs.BulkTanimotoSimilarity(fps, data)\n return sim\n\n\n\n\n\n\nif __name__ == '__main__':\n\n mol = Chem.MolFromSmiles('NC(=O)c1ccccc1Nc1ccnc(Oc2ccccc2)c1')\n graph = mol_to_dgl(mol)\n\n\n env = Environement()\n\n env.current_graph = graph\n env.current_mol = mol\n for i in range(50):\n print(i)\n add_avalaible_atom = torch.nonzero(env.current_graph.ndata['n_feat'][:, -1], as_tuple=True)[0]\n del_avalaible_bond = torch.nonzero(env.current_graph.edata['e_feat'].argmax(dim=-1) == 0, as_tuple=True)[0]\n\n action_prob = [int(del_avalaible_bond.shape[0] > 0), int(add_avalaible_atom.shape[0] > 0)]\n if sum(action_prob)==0 :\n break\n action = sample_idx(action_prob)\n if action ==0 :\n\n del_idx = del_avalaible_bond.tolist()[sample_idx(len(del_avalaible_bond.tolist())*[1])]\n arm_idx = None\n add_idx = None\n print('check bond type')\n # for index in del_avalaible_bond :\n # u = env.current_graph.all_edges()[0][index].item()\n # v = env.current_graph.all_edges()[1][index].item()\n # print(env.current_mol.GetBondBetweenAtoms(u,v).GetBondType())\n elif action==1 :\n arm_idx = sample_idx([i for i in range(1000)])\n add_idx = add_avalaible_atom.tolist()[sample_idx(len(add_avalaible_atom.tolist())*[1])]\n del_idx = None\n\n action_dict = {\n 'act' : action,\n 'del' : del_idx,\n 'arm' : arm_idx,\n 'add' : add_idx\n }\n _, not_changed = env.next_step(action_dict)\n\n print('not changed ', not_changed)\n if not_changed :\n print(action_dict)\n print('nb atoms', len(env.current_mol.GetAtoms()))\n # if action == 0 :\n # u = env.current_graph.all_edges()[0][del_idx].item()\n # v = env.current_graph.all_edges()[1][del_idx].item()\n # type_bond = str(env.current_mol.GetBondBetweenAtoms(u,v).GetBondType())\n # path = 'mols/'+type_bond +'_mol_del_{}_{}.jpg'.format(u,v)\n # draw_mol(env.current_mol,path)\n # print(path)\n # print(action_prob)\n # print(del_idx)\n # print(del_avalaible_bond)\n # for index in del_avalaible_bond:\n # u = env.current_graph.all_edges()[0][index].item()\n # v = env.current_graph.all_edges()[1][index].item()\n # print(env.current_mol.GetBondBetweenAtoms(u, v).GetBondType())\n\n if action == 1:\n path = 'mols/mol_add_idx_{}_arm_idx_{}.jpg'.format(add_idx, arm_idx)\n # draw_mol(env.current_mol,path)\n print(path)\n print(env.current_graph.in_degrees())\n print(torch.nonzero(env.current_graph.out_degrees()==1, as_tuple =True))\n leaf = torch.nonzero(env.current_graph.out_degrees()==1, as_tuple =True)[0].unsqueeze(0).repeat(10,1)\n print(leaf)\n gr = env.current_graph.all_edges()[0][:10].unsqueeze(1).repeat(1,torch.nonzero(env.current_graph.out_degrees()==1, as_tuple =True)[0].shape[0])\n print(gr)\n print((gr==leaf).sum(dim=-1))\n # env.edit(graph,mol,action= , del_idx=, add_idx=, arm_idx=)\n\n\n # if __name__ == '__main__':\n # env = Environment_improve()\n # action = {\n # 'act' : 0,\n # 'del_idx' : 5,\n # 'add_idx' : 3,\n # 'arm_idx' : 6\n # }\n # mol, _ = env.init_mol()\n #\n # print(env.compute_prop_score(mol))\n # print()\n # print(env.scaffold_mol_dict['smiles'][env.index_scaffold])\n # # print(env.indice_init_mol, env.init_mol_from_ref)\n#\n# # with open('score.txt','w') as f:\n# # for i,smi in enumerate(env.ref_mol_dict['smiles']):\n# # text = smi\n# # mol = env.ref_mol_dict['mols'][i]\n# # gsk3 = get_scores('gsk3b',[mol])\n# # jnk3 = get_scores('jnk3', [mol])\n# # text += ','+str(gsk3)+','+str(jnk3)\n# # f.write(text+'\\n')\n# #\n# # with open('MARS/data/actives_jnk3.txt', 'r') as f :\n# # data = f.readlines()\n# # data = [smi[:-1] for smi in data[1:]]\n# # score = []\n# # smiles = []\n# # mols = []\n# # for d in data :\n# # smi, score_ = d.split(',')\n# # smiles.append(smi)\n# # score.append(score_)\n# # mols.append(Chem.MolFromSmiles(smi))\n# # scores = np.array(get_scores('jnk3',mols))>=0.5\n# # print(scores.mean())\n#\n# done = False\n# for t in range(100):\n#\n# done = False\n# env.reset()\n# c = 0\n# while done ==False and c<10:\n# c+=1\n# graph = env.current_graph\n# nb_edges = graph.all_edges()[0].shape[0]\n# nb_nodes = graph.number_of_nodes()\n#\n# act = sample_idx([0.5,0.5])\n# del_idx = sample_idx(nb_edges*[1/nb_edges])\n# p_add = np.array(nb_nodes*[1/nb_nodes])*(graph.ndata['n_feat'][:,-1]>0).numpy()\n# add_idx = sample_idx(p_add.tolist())\n# arm_idx = sample_idx(10*[1/10])\n# action = {\n# 'act' : 1,\n# 'del' : del_idx,\n# 'add' : add_idx,\n# 'arm' : arm_idx\n# }\n# state, reward, done = env.next_step(action)\n# print('imitation size ', len(env.imitation_dataset))\n#\n# # ucb_ref = np.array(env.ref_mol_dict['UCB'])\n# # print(ucb_ref[ucb_ref!=0])\n# # print(np.array(env.ref_mol_dict['N'])[np.array(env.ref_mol_dict['N'])!=0],'N')\n# # print(np.array(env.ref_mol_dict['R'])[np.array(env.ref_mol_dict['N'])!=0],'R')\n# # print(np.array(env.ref_mol_dict['N']).sum()+np.array(env.new_mol_dict['N']).sum())\n# # print(np.array(env.ref_mol_dict['R']).sum() + np.array(env.new_mol_dict['R']).sum())\n# # print(len(env.new_mol_dict['mols']))\n# # print(env.new_mol_dict['smiles'])\n# # print(env.new_mol_dict['UCB'])\n# # print(env.new_mol_dict)\n#\n# new_mol = Chem.MolFromSmiles('O=[SH](=O)c1ccccc1Nc1nc(Nc2ccccc2)ncc1Cl')\n# gsk3b_score = get_scores('gsk3b', [new_mol])[0]\n# jnk3_score = get_scores('jnk3', [new_mol])[0]\n# sa_score = get_score('sa', new_mol)\n# qed_score = get_score('qed', new_mol)\n# print(gsk3b_score)\n# print(jnk3_score)\n# print(sa_score)\n# print(qed_score)\n# print(min(gsk3b_score,0.6))\n\n\n\n\n\n","repo_name":"VincentH23/scaffold","sub_path":"environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":15751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"14024428169","text":"from __future__ import print_function\nfrom __future__ import division\nfrom future import standard_library\nstandard_library.install_aliases()\n#from builtins import str\nfrom builtins import range\nfrom quantities.quantity import Quantity\nimport sciunit\nfrom sciunit import Test,Score\ntry:\n from sciunit import ObservationError\nexcept:\n from sciunit.errors import ObservationError\nimport hippounit.capabilities as cap\nfrom sciunit.utils import assert_dimensionless# Converters.\nfrom sciunit.scores import BooleanScore,ZScore # Scores.\n\ntry:\n import numpy\nexcept:\n print(\"NumPy not loaded.\")\n\n#import matplotlib\n#matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\n#from neuron import h\nimport collections\nimport efel\nimport os\nimport multiprocessing\nimport multiprocessing.pool\nimport functools\nimport math\nfrom scipy import stats\n\nfrom scipy.optimize import fsolve, curve_fit\nimport scipy.interpolate as interpolate\nfrom scipy.signal import find_peaks\n\nimport json\nfrom hippounit import plottools\nimport collections\n\n\ntry:\n import pickle as pickle\nexcept:\n import pickle\nimport gzip\n\ntry:\n import copy_reg\nexcept:\n import copyreg\n\nfrom types import MethodType\n\nfrom quantities import mV, nA, ms, V, s\n\nfrom hippounit import scores\nimport copy\n\n\ndef _pickle_method(method):\n func_name = method.__func__.__name__\n obj = method.__self__\n cls = method.__self__.__class__\n return _unpickle_method, (func_name, obj, cls)\n\ndef _unpickle_method(func_name, obj, cls):\n for cls in cls.mro():\n try:\n func = cls.__dict__[func_name]\n except KeyError:\n pass\n else:\n break\n return func.__get__(obj, cls)\n\n\nclass NonDaemonPool(multiprocessing.pool.Pool):\n def Process(self, *args, **kwds):\n proc = super(NonDaemonPool, self).Process(*args, **kwds)\n\n class NonDaemonProcess(proc.__class__):\n \"\"\"Monkey-patch process to ensure it is never daemonized\"\"\"\n\n @property\n def daemon(self):\n return False\n\n @daemon.setter\n def daemon(self, val):\n pass\n\n proc.__class__ = NonDaemonProcess\n\n return proc\n\ntry:\n copy_reg.pickle(MethodType, _pickle_method, _unpickle_method)\nexcept:\n copyreg.pickle(MethodType, _pickle_method, _unpickle_method)\n\n\nclass PathwayInteraction(Test):\n \"\"\" \"\"\"\n\n def __init__(self, config = {},\n observation = {},\n name=\"Pathway Interaction test\" ,\n force_run=False,\n force_run_adjust_syn_weight=False,\n base_directory= None,\n num_of_dend_locations = 15,\n random_seed = 1,\n show_plot=True,\n save_all = True,\n AMPA_weight_init = 0.000748,\n trunk_origin = None):\n\n self.num_of_dend_locations = num_of_dend_locations\n self.random_seed = random_seed\n\n observation = self.format_data(observation)\n observation = self.add_std_to_observation(observation)\n\n Test.__init__(self, observation, name)\n\n self.required_capabilities = (cap.ProvidesRandomDendriticLocations, cap.ProvidesRecordingLocationsOnTrunk, cap.ReceivesSynapse, cap.InitialiseModel, cap.ThetaSynapticStimuli, cap.RunSimulation_ReturnTraces, cap.NumOfPossibleLocations, cap.ReceivesSynapseGivenPathway, cap.ReceivesMultipleSquareCurrents) # +=\n\n self.force_run_adjust_syn_weight = force_run_adjust_syn_weight\n self.force_run = force_run\n self.show_plot = show_plot\n self.save_all = save_all\n\n self.base_directory = base_directory\n\n self.path_figs = None #added later, because model name is needed\n self.path_results = None\n self.trunk_origin = trunk_origin\n\n self.logFile = None\n self.test_log_filename = 'test_log.txt'\n self.message_to_logFile = ''\n\n self.config = config\n\n self.npool = multiprocessing.cpu_count() - 1\n\n self.AMPA_weight_init = AMPA_weight_init\n\n description = \"\"\n\n score_type = scores.ZScore_PathwayInteraction\n\n def format_data(self, observation):\n\n for key, val in list(observation.items()):\n for ke, va in list(val.items()):\n for k, v in list(va.items()):\n try:\n assert type(observation[key][ke][k]) is Quantity\n except Exception as e:\n try:\n observation[key][ke][k] = float(val)\n except Exception as e:\n quantity_parts = v.split(\" \")\n number = float(quantity_parts[0])\n units = \" \".join(quantity_parts[1:])\n observation[key][ke][k] = Quantity(number, units)\n return observation\n\n def add_std_to_observation(self, observation):\n\n for key, val in list(observation.items()):\n for ke, va in list(val.items()):\n observation[key][ke]['std'] = float(observation[key][ke]['sem'] * numpy.sqrt(observation[key][ke]['n'])) * observation[key][ke]['mean'].units\n #print(observation)\n return observation\n\n \n def analyse_syn_traces(self, model, t, v, t_no_input, v_no_input):\n if not numpy.array_equal(t, t_no_input): #if the time vectors are not equal, the traces are resampled with fixed time step\n dt = 0.025\n time_vector = numpy.arange(t[0], t[-1], dt) #from the first to the last element of the original time vector\n\n interp_trace = numpy.interp(time_vector, t, v)\n interp_trace_no_input = numpy.interp(time_vector, t_no_input, v_no_input)\n\n depol = interp_trace - interp_trace_no_input\n\n #print(\"Voltage traces are resampled using linear interpolation\")\n\n else:\n depol = v - v_no_input\n time_vector = t\n\n max_depol = max(depol)\n\n return max_depol \n\n\n def synapse(self, model, t_no_input, v_no_input, weight, path_adjust_syn_weight, pathway, dend_loc0):\n file_name = path_adjust_syn_weight + 'Trace_' + str(dend_loc0[0]) + '(' + str(dend_loc0[1]) + ')_' + 'weight_' + str(weight) + '.p'\n\n if self.force_run_adjust_syn_weight or (os.path.isfile(file_name) is False):\n\n t, v, v_dend = model.run_synapse_pathway_get_vm(dend_loc0, weight, pathway)\n if self.save_all:\n pickle.dump([t, v, v_dend], gzip.GzipFile(file_name, \"wb\"))\n\n else:\n [t, v, v_dend] = pickle.load(gzip.GzipFile(file_name, \"rb\"))\n\n max_soma_depol = self.analyse_syn_traces(model, t, v, t_no_input, v_no_input)\n\n return max_soma_depol \n\n def adjust_syn_weight(self, model, dend_loc, pathway):\n\n if self.base_directory:\n path_adjust_syn_weight = self.base_directory + 'temp_data/' + 'pathway_interaction/' + model.name + '/adjust_syn_weight/'\n else:\n path_adjust_syn_weight = model.base_directory + 'temp_data/' + 'pathway_interaction/adjust_syn_weight/'\n\n try:\n if not os.path.exists(path_adjust_syn_weight) and self.save_all:\n os.makedirs(path_adjust_syn_weight)\n except OSError as e:\n if e.errno != 17:\n raise\n pass\n\n if pathway == 'SC':\n file_name = path_adjust_syn_weight + 'SC_weight.p'\n desired_somatic_depol = 0.2\n if pathway == 'PP':\n file_name = path_adjust_syn_weight + 'PP_weight.p'\n desired_somatic_depol = 0.2\n\n \n if self.force_run_adjust_syn_weight or (os.path.isfile(file_name) is False):\n\n file_name_no_input = path_adjust_syn_weight + 'Traces_no_input.p'\n\n if os.path.isfile(file_name_no_input) is False:\n\n pool_syn_ = multiprocessing.Pool(1, maxtasksperchild = 1) # I use multiprocessing to keep every NEURON related task in independent processes\n t_no_input, v_no_input, v_dend_no_input = pool_syn_.apply(model.run_synapse_pathway_get_vm, args = (dend_loc[0], 0.0, pathway))\n # plt.plot(t_no_input, v_no_input)\n # plt.show()\n pool_syn_.terminate()\n pool_syn_.join()\n del pool_syn_\n if self.save_all:\n pickle.dump([t_no_input, v_no_input, v_dend_no_input], gzip.GzipFile(file_name_no_input, \"wb\"))\n\n else:\n [t_no_input, v_no_input, v_dend_no_input] = pickle.load(gzip.GzipFile(file_name_no_input, \"rb\"))\n\n \n synapse_ = functools.partial(self.synapse, model, t_no_input, v_no_input, self.AMPA_weight_init, path_adjust_syn_weight, pathway)\n\n pool_syn = multiprocessing.Pool(self.npool, maxtasksperchild = 1) # I use multiprocessing to keep every NEURON related task in independent processes\n max_soma_depols = pool_syn.map(synapse_, dend_loc, chunksize=1)\n pool_syn.terminate()\n pool_syn.join()\n del pool_syn\n\n #print(\"before:\" , max_soma_depols)\n avg_max_soma_depols = numpy.mean(max_soma_depols)\n #print('avg before', avg_max_soma_depols)\n\n scale_factor = desired_somatic_depol / avg_max_soma_depols\n #print('scale_factor', scale_factor)\n\n synapse_ = functools.partial(self.synapse, model, t_no_input, v_no_input, self.AMPA_weight_init * scale_factor, path_adjust_syn_weight, pathway)\n\n pool_syn = multiprocessing.Pool(self.npool, maxtasksperchild = 1) # I use multiprocessing to keep every NEURON related task in independent processes\n max_soma_depols = pool_syn.map(synapse_, dend_loc, chunksize=1)\n pool_syn.terminate()\n pool_syn.join()\n del pool_syn\n\n #print(\"after:\" , max_soma_depols)\n avg_max_soma_depols = numpy.mean(max_soma_depols)\n #print('avg after', avg_max_soma_depols)\n\n AMPA_weight_final = self.AMPA_weight_init * scale_factor\n\n\n pickle.dump(AMPA_weight_final, gzip.GzipFile(file_name, \"wb\"))\n\n\n else:\n AMPA_weight_final = pickle.load(gzip.GzipFile(file_name, \"rb\"))\n\n\n return AMPA_weight_final\n\n def adjust_num_syn(self, model, SC_weight, PP_weight, recording_loc, stimuli_params, t_no_input_rec_dend, v_no_input_rec_dend, pathway):\n interval_bw_trains, interval_bw_stimuli_in_train, num_trains, num_stimuli_in_train = stimuli_params\n\n new_stimuli_params = [interval_bw_trains, interval_bw_stimuli_in_train, 1, num_stimuli_in_train] \n\n dist_range = [0,9999999999]\n random_seed = self.random_seed\n\n if self.base_directory:\n path = self.base_directory + 'temp_data/' + 'pathway_interaction/' + model.name + '/'\n else:\n path = model.base_directory + 'temp_data/' + 'pathway_interaction/'\n\n file_name = path + pathway + '_dendritic_locations.p'\n\n if self.force_run_adjust_syn_weight or (os.path.isfile(file_name) is False):\n\n if pathway == 'SC':\n\n model.SecList_name = model.ObliqueSecList_name\n dend_loc, locations_distances = model.get_random_locations_multiproc(10, self.random_seed, dist_range, self.trunk_origin) # number of random locations , seed\n PP_dend_loc =[] \n num_of_loc = model.get_num_of_possible_locations()\n \n exp_depol = 16.0\n exp_depol_sd = 1.6\n\n # traces = self.theta_pathway_stimulus(model, SC_weight, PP_weight, dend_loc, PP_dend_loc, recording_loc, new_stimuli_params, 600, pathway, save_traces=False))\n\n\n elif pathway == 'PP':\n model.SecList_name = model.TuftSecList_name\n dend_loc, locations_distances = model.get_random_locations_multiproc(10, self.random_seed, dist_range, self.trunk_origin) # number of random locations , seed\n \n SC_dend_loc =[] \n num_of_loc = model.get_num_of_possible_locations()\n\n exp_depol = 10.2\n exp_depol_sd = 1.0\n\n # traces = self.theta_pathway_stimulus(model, SC_weight, PP_weight, SC_dend_loc, dend_loc, recording_loc, new_stimuli_params, 600, pathway, save_traces=False))\n\n\n # max_depol = self.analyse_syn_traces(model, traces[pathway]['t'], traces[pathway]['v_dend'], t_no_input_rec_dend, v_no_input_rec_dend)\n # plt.figure()\n # plt.plot(traces[pathway]['t'], traces[pathway]['v_dend'])\n # plt.show()\n # print(max_depol)\n\n found = False\n prev_max_depol = None\n\n #print(pathway, 'num_of_loc', num_of_loc)\n \n #\"\"\"\n if pathway == 'PP' or pathway == 'SC': #changing this I can play with which pathway to tune automatically and which not\n while not found and len(dend_loc) > 1 and len(dend_loc) < num_of_loc and len(dend_loc) < 50:\n\n random_seed += 1\n\n if prev_max_depol:\n prev_max_depol = max_depol # if it already has a value (we are not in the first iteration), it gets the value of the previous iteration \n\n pool = multiprocessing.Pool(1, maxtasksperchild = 1) # multiprocessing pool is used so that the model can be killed after the simulation, avoiding pickle errors\n \n if pathway == 'SC':\n traces = pool.apply(self.theta_pathway_stimulus, args = (model, SC_weight, PP_weight, dend_loc, PP_dend_loc, recording_loc, new_stimuli_params, 1600, 0, pathway, False)) # , save_traces=False because we don't want to save all the traces during adjustment\n\n elif pathway == 'PP':\n traces = pool.apply(self.theta_pathway_stimulus, args = (model, SC_weight, PP_weight, SC_dend_loc, dend_loc, recording_loc, new_stimuli_params, 1600, 0, pathway, False))\n\n pool.terminate()\n pool.join()\n del pool\n\n\n max_depol = self.analyse_syn_traces(model, traces[pathway]['t'], traces[pathway]['v_dend'], t_no_input_rec_dend, v_no_input_rec_dend)\n #print(pathway, ': ', max_depol)\n\n\n if not prev_max_depol: # if it has a value of None (we are in the first iteration), it gets the same value as the max_depol \n prev_max_depol = max_depol\n\n\n if max_depol < exp_depol - exp_depol_sd and prev_max_depol < exp_depol - exp_depol_sd:\n if pathway == 'SC':\n model.SecList_name = model.ObliqueSecList_name\n elif pathway == 'PP':\n model.SecList_name = model.TuftSecList_name\n \n prev_dend_loc = list(dend_loc)\n \n dend_loc_, locations_distances_ = model.get_random_locations_multiproc(1, random_seed, dist_range, self.trunk_origin) # select one more location\n\n while dend_loc_[0] in dend_loc and len(dend_loc) <= num_of_loc: \n random_seed += 1\n dend_loc_, locations_distances_ = model.get_random_locations_multiproc(1, random_seed, dist_range, self.trunk_origin) # select one more location\n dend_loc.append(dend_loc_[0]) \n #print(pathway, ': ', dend_loc)\n\n elif max_depol < exp_depol - exp_depol_sd and prev_max_depol > exp_depol + exp_depol_sd:\n #print(pathway, ' koztes1')\n \n #print('depols', max_depol, prev_max_depol)\n #print('dend_locs')\n #print(dend_loc)\n #print(prev_dend_loc)\n accepted_depol_diff= min(abs(max_depol-exp_depol), abs(prev_max_depol-exp_depol))\n if accepted_depol_diff == abs(prev_max_depol-exp_depol):\n dend_loc = prev_dend_loc\n found = True\n else:\n # dend_loc remains\n found = True \n #print('chosen dend_loc', dend_loc)\n\n elif max_depol > exp_depol + exp_depol_sd and prev_max_depol > exp_depol + exp_depol_sd:\n prev_dend_loc = list(dend_loc)\n\n dend_loc.pop() #removing last element\n #print(pathway, ': ', dend_loc)\n\n elif max_depol > exp_depol + exp_depol_sd and prev_max_depol < exp_depol - exp_depol_sd:\n #print(pathway, ' koztes2')\n\n #print('depols', max_depol, prev_max_depol)\n #print('dend_locs')\n #print(dend_loc)\n #print(prev_dend_loc)\n\n accepted_depol_diff= min(abs(max_depol-exp_depol), abs(prev_max_depol-exp_depol))\n if accepted_depol_diff == abs(prev_max_depol-exp_depol):\n dend_loc = list(prev_dend_loc)\n found = True\n else:\n # dend_loc remains\n found = True \n #print('chosen dend_loc', dend_loc)\n\n elif exp_depol - exp_depol_sd < max_depol < exp_depol + exp_depol_sd:\n\n found = True\n #print(pathway, ': ', dend_loc)\n \n \n\n\n if not found:\n print(\"The number of activated synapses could not be adjusted properly on pathway:\", pathway)\n print(\"Maximum depolarization achieved:\", max_depol, \"mV\")\n print(\"Stimulated dendritic locations:\", dend_loc)\n self.message_to_logFile += \"The number of activated synapses could not be adjusted properly on pathway: \" + pathway + \"\\n\" + \"Maximum depolarization achieved: \" + str(max_depol) + \" mV \\n\" + \"Stimulated dendritic locations :\" + str(dend_loc) + \"\\n\"\n \n #\"\"\"\n\n pathway_dend_locs = {pathway: dend_loc}\n print(\"final dend_loc \" + pathway + \" : \", dend_loc)\n pickle.dump(pathway_dend_locs, gzip.GzipFile(file_name, \"wb\"))\n\n else:\n \n pathway_dend_locs = pickle.load(gzip.GzipFile(file_name, \"rb\"))\n print(\"final dend_loc \", pathway_dend_locs)\n\n return pathway_dend_locs \n\n\n def generate_no_input_traces(self, model, recording_loc):\n\n\n if self.base_directory:\n path = self.base_directory + 'temp_data/' + 'pathway_interaction/' + model.name + '/'\n else:\n path = model.base_directory + 'temp_data/' + 'pathway_interaction/'\n\n file_name_no_input = path + 'traces_no_input.p'\n\n if self.force_run or (os.path.isfile(file_name_no_input) is False):\n model.initialise()\n t, v, v_dend,v_stim_locs = model.run_simulation([], recording_loc, 1600)\n\n if self.save_all:\n pickle.dump([t, v, v_dend], gzip.GzipFile(file_name_no_input, \"wb\"))\n\n else:\n [t, v, v_dend] = pickle.load(gzip.GzipFile(file_name_no_input, \"rb\"))\n\n return t, v, v_dend\n\n def spikecount(self, delay, duration, v_trace):\n\n efel.setThreshold(-25)\n\n trace = {}\n traces=[]\n trace['T'] = v_trace[0]\n trace['V'] = v_trace[1]\n trace['stim_start'] = [delay]\n trace['stim_end'] = [delay + duration]\n traces.append(trace)\n\n traces_results = efel.getFeatureValues(traces, ['Spikecount'])\n\n spikecount = traces_results[0]['Spikecount'][0]\n\n return spikecount\n\n def extract_efel_features(self, delay, duration, v_trace, features):\n\n efel.setThreshold(-25)\n\n trace = {}\n traces=[]\n trace['T'] = v_trace[0]\n trace['V'] = v_trace[1]\n trace['stim_start'] = [delay]\n trace['stim_end'] = [delay + duration]\n traces.append(trace)\n\n traces_results = efel.getFeatureValues(traces, features)\n\n return traces_results[0]\n\n def run_current_stim(self, model, path_adjust_current_amplitude, amplitude, delay, duration, stim_section_name, stim_location_x, rec_section_name, rec_location_x):\n\n file_name = path_adjust_current_amplitude + 'dend_trace_' + str(amplitude) + '_nA.p'\n\n if os.path.isfile(file_name) is False:\n\n pool = multiprocessing.Pool(1, maxtasksperchild = 1) # multiprocessing pool is used so that the model can be killed after the simulation, avoiding pickle errors\n t, v = pool.apply(model.get_vm, args = (amplitude, delay, duration, stim_section_name, stim_location_x, rec_section_name, rec_location_x))\n pool.terminate()\n pool.join()\n del pool\n \n if self.save_all:\n pickle.dump([t, v], gzip.GzipFile(file_name, \"wb\"))\n\n else:\n [t, v] = pickle.load(gzip.GzipFile(file_name, \"rb\"))\n\n return t, v\n\n def binsearch(self, model, path_adjust_current_amplitude, stim_range, delay, duration, stim_section_name, stim_location_x, rec_section_name, rec_location_x, desired_depol,v_no_input):\n c_minmax = stim_range\n c_step_start = 0.01\n c_step_stop= 0.002\n\n found = False\n spikecounts = []\n amplitudes = []\n max_depols = []\n\n tolerance = 2.0\n\n print(\"DOING BINARY SEARCH\")\n\n while c_step_start >= c_step_stop and not found:\n\n c_stim = numpy.arange(c_minmax[0], c_minmax[1], c_step_start)\n #print('c_stim: ', c_stim)\n\n first = 0\n last = numpy.size(c_stim, axis=0)-1\n\n while first <= last and not found:\n\n midpoint = (first + last)//2\n amplitude = c_stim[midpoint]\n #print('INFO: ', first, c_stim[first], last, c_stim[last])\n\n result=[]\n\n t, v = self.run_current_stim(model, path_adjust_current_amplitude, amplitude, delay, duration, stim_section_name, stim_location_x, rec_section_name, rec_location_x)\n\n depol_dend = v - v_no_input\n\n max_depol = numpy.max(depol_dend)\n spike_count = self.spikecount(delay, duration, [t, v])\n #print(\"amp: \", amplitude, \"depol: \", max_depol, \"spike count: \", spike_count)\n\n amplitudes.append(amplitude)\n spikecounts.append(spike_count)\n max_depols.append(max_depol)\n\n if spike_count == 0 and max_depol <= desired_depol + tolerance and max_depol >= desired_depol - tolerance:\n found = True\n else:\n if spike_count > 0 or (spike_count == 0 and max_depol > desired_depol + tolerance):\n last = midpoint-1\n elif spike_count == 0 and max_depol < desired_depol - tolerance:\n first = midpoint+1\n c_step_start=c_step_start/2\n\n if not found:\n amp_index = min((p for p in range(len(spikecounts)) if spikecounts[p] == 0), key=lambda i: abs(max_depols[i]-desired_depol)) # we choose the one that is nearest to the desired depol, but no APs\n amplitude = amplitudes[amp_index]\n spike_count = spikecounts[amp_index]\n max_depol = max_depols[amp_index]\n\n\n binsearch_result=[found, amplitude, max_depol, spike_count]\n #print(\"binsearch result: \", binsearch_result)\n\n return binsearch_result\n\n\n def adjust_current_amplitude(self, model, stimuli_list):\n\n amplitude, delay, duration, stim_section_name, stim_location_x, rec_section_name, rec_location_x = stimuli_list\n\n if self.base_directory:\n path_adjust_current_amplitude = self.base_directory + 'temp_data/' + 'pathway_interaction/' + model.name + '/adjust_current_amplitude/'\n else:\n path_adjust_current_amplitude = model.base_directory + 'temp_data/' + 'pathway_interaction/adjust_current_amplitude/'\n\n\n try:\n if not os.path.exists(path_adjust_current_amplitude) and self.save_all:\n os.makedirs(path_adjust_current_amplitude)\n except OSError as e:\n if e.errno != 17:\n raise\n pass\n\n desired_depol = 15.8 # mV\n\n file_name_current_amp = path_adjust_current_amplitude + 'current_amp.p'\n\n if self.force_run_adjust_syn_weight or (os.path.isfile(file_name_current_amp) is False):\n\n t_no_input, v_no_input = self.run_current_stim(model, path_adjust_current_amplitude, 0.0, delay, duration, stim_section_name, stim_location_x, rec_section_name, rec_location_x)\n\n spike_count_no_input = self.spikecount(delay, duration, [t_no_input, v_no_input])\n\n if spike_count_no_input > 0:\n print(\"Cell fires spontaneously\")\n current_amp_final = floeat('nan')\n\n else:\n\n amplitude = 0.25\n\n t, v = self.run_current_stim(model, path_adjust_current_amplitude, amplitude, delay, duration, stim_section_name, stim_location_x, rec_section_name, rec_location_x)\n\n #depol_dend = v - v_no_input\n\n #max_depol = numpy.max(depol_dend)\n max_depol = self.analyse_syn_traces(model, t, v, t_no_input, v_no_input)\n spike_count = self.spikecount(delay, duration, [t, v])\n #print(\"amp: \", amplitude, \"depol: \", max_depol, \"spike count: \", spike_count)\n\n\n while spike_count > 0:\n\n amplitude = amplitude/2.0\n\n t, v = self.run_current_stim(model, path_adjust_current_amplitude, amplitude, delay, duration, stim_section_name, stim_location_x, rec_section_name, rec_location_x)\n\n #depol_dend = v - v_no_input\n\n #max_depol = numpy.max(depol_dend)\n max_depol = self.analyse_syn_traces(model, t, v, t_no_input, v_no_input)\n spike_count = self.spikecount(delay, duration, [t, v])\n #print(\"amp: \", amplitude, \"depol: \", max_depol, \"spike count: \", spike_count)\n\n\n scale_factor = desired_depol / max_depol\n #print(\"scale_factor: \", scale_factor)\n\n\n amplitude = amplitude * scale_factor\n\n t, v = self.run_current_stim(model, path_adjust_current_amplitude, amplitude, delay, duration, stim_section_name, stim_location_x, rec_section_name, rec_location_x)\n\n #depol_dend = v - v_no_input\n\n #max_depol = numpy.max(depol_dend)\n max_depol = self.analyse_syn_traces(model, t, v, t_no_input, v_no_input)\n spike_count = self.spikecount(delay, duration, [t, v])\n #print(\"amp: \", amplitude, \"depol: \", max_depol, \"spike count: \", spike_count)\n\n \n if spike_count == 0:\n current_amp_final = amplitude\n\n pickle.dump(current_amp_final, gzip.GzipFile(file_name_current_amp, \"wb\"))\n else:\n binsearc_result = self.binsearch(model, path_adjust_current_amplitude, [0, amplitude], delay, duration, stim_section_name, stim_location_x, rec_section_name, rec_location_x, desired_depol, v_no_input)\n\n current_amp_final = binsearc_result[1]\n\n pickle.dump(current_amp_final, gzip.GzipFile(file_name_current_amp, \"wb\"))\n\n\n else:\n current_amp_final = pickle.load(gzip.GzipFile(file_name_current_amp, \"rb\"))\n\n #print(\"current_amp_final: \", current_amp_final)\n\n self.message_to_logFile += \"current_amp_final: \" + str(current_amp_final) + \"\\n\"\n\n return current_amp_final\n \n def theta_pathway_stimulus(self, model, SC_weight, PP_weight, SC_dend_loc, PP_dend_loc, recording_loc, stimuli_params, tstop, depol_amp, pathway, save_traces):\n\n \"\"\"Simulates pathway stimulation of the Schaffer-collateral or the Perforant Path, or both at the same time. The simultaneous activation of the 2 pathways is solved by calling the same Capability function but with different arguments (section list, synaptic parameters). For this to be feasible, the model must be loaded first, and therefore separate capability methods are needed to (1) load the model, (2) define the synaptic stimulus and (3) to run the simulation and make the recordings. In other tests all of these were done through a single capability method.\"\"\"\n\n '''\n interval_bw_trains = 1/ self.config[\"frequency of stimulus sequence\"] * 1000\n interval_bw_stimuli_in_train = 1/ self.config[\"frequency of trains\"] * 1000\n num_trains = self.config[\"number of trains\"]\n num_stimuli_in_train = self.config[\"number of stimuli in a train\"]\n '''\n\n interval_bw_trains, interval_bw_stimuli_in_train, num_trains, num_stimuli_in_train = stimuli_params\n\n if self.base_directory:\n path = self.base_directory + 'temp_data/' + 'pathway_interaction/' + model.name + '/'\n else:\n path = model.base_directory + 'temp_data/' + 'pathway_interaction/'\n\n file_name = path + pathway + '_traces.p'\n\n if (self.force_run or (os.path.isfile(file_name) is False)) and save_traces:\n\n\n model.initialise() # should be solved more like using Capabilities (problem: to add synapses, model should be loaded, but can not be reloaded. We want to be able to add PP and SC stimulation separately. (Later different synaptic parameters, different delay etc) \n\n if pathway == 'SC':\n model.activate_theta_stimuli(SC_dend_loc, SC_weight, pathway, interval_bw_trains, interval_bw_stimuli_in_train, num_trains, num_stimuli_in_train)\n t, v, v_dend, v_stim_locs = model.run_simulation(SC_dend_loc, recording_loc, tstop)\n '''\n plt.figure()\n plt.plot(t,v)\n plt.plot(t,v_dend)\n plt.title('SC stimulus')\n '''\n elif pathway == 'PP':\n model.activate_theta_stimuli(PP_dend_loc, PP_weight, pathway, interval_bw_trains, interval_bw_stimuli_in_train, num_trains, num_stimuli_in_train)\n t, v, v_dend, v_stim_locs = model.run_simulation(PP_dend_loc, recording_loc, tstop)\n '''\n plt.figure()\n plt.plot(t,v)\n plt.plot(t,v_dend)\n plt.title('PP stimulus')\n '''\n elif pathway == 'SC+PP':\n model.activate_theta_stimuli(PP_dend_loc, PP_weight, 'PP', interval_bw_trains, interval_bw_stimuli_in_train, num_trains, num_stimuli_in_train)\n # model.activate_theta_stimuli(SC_dend_loc + PP_dend_loc, PP_weight, 'PP') \n model.activate_theta_stimuli(SC_dend_loc, SC_weight, 'SC', interval_bw_trains, interval_bw_stimuli_in_train, num_trains, num_stimuli_in_train)\n t, v, v_dend, v_stim_locs = model.run_simulation(SC_dend_loc + PP_dend_loc, recording_loc, tstop)\n '''\n plt.figure()\n plt.plot(t,v)\n plt.plot(t,v_dend)\n plt.title('SC+PP stimulus')\n '''\n # plt.show()\n\n elif pathway == 'depol':\n (rec_ndend, xloc), distance = recording_loc\n model.activate_current_stimuli(depol_amp, model.start, num_stimuli_in_train * interval_bw_stimuli_in_train, num_trains, interval_bw_trains, rec_ndend, xloc)\n t, v, v_dend, v_stim_locs = model.run_simulation([], recording_loc, tstop)\n\n elif pathway == 'PP+depol':\n (rec_ndend, xloc), distance = recording_loc\n model.activate_theta_stimuli(PP_dend_loc, PP_weight, 'PP', interval_bw_trains, interval_bw_stimuli_in_train, num_trains, num_stimuli_in_train)\n model.activate_current_stimuli(depol_amp, model.start, num_stimuli_in_train * interval_bw_stimuli_in_train, num_trains, interval_bw_trains, rec_ndend, xloc)\n t, v, v_dend, v_stim_locs = model.run_simulation(PP_dend_loc, recording_loc, tstop)\n\n elif pathway == 'SC+depol':\n (rec_ndend, xloc), distance = recording_loc\n model.activate_theta_stimuli(SC_dend_loc, SC_weight, 'SC', interval_bw_trains, interval_bw_stimuli_in_train, num_trains, num_stimuli_in_train)\n model.activate_current_stimuli(depol_amp, model.start, num_stimuli_in_train * interval_bw_stimuli_in_train, num_trains, interval_bw_trains, rec_ndend, xloc)\n t, v, v_dend, v_stim_locs = model.run_simulation(SC_dend_loc, recording_loc, tstop)\n\n traces = {pathway: {'t' : t, 'v_soma' : v, 'v_dend' : v_dend, 'v_stim_locs' : v_stim_locs}} \n\n if self.save_all:\n pickle.dump(traces, gzip.GzipFile(file_name, \"wb\"))\n\n\n elif save_traces is False:\n\n model.initialise() # should be solved more like using Capabilities (problem: to add synapses, model should be loaded, but can not be reloaded. We want to be able to add PP and SC stimulation separately. (Later different synaptic parameters, different delay etc) \n\n if pathway == 'SC':\n model.activate_theta_stimuli(SC_dend_loc, SC_weight, pathway, interval_bw_trains, interval_bw_stimuli_in_train, num_trains, num_stimuli_in_train)\n t, v, v_dend, v_stim_locs = model.run_simulation(SC_dend_loc, recording_loc, tstop)\n\n elif pathway == 'PP':\n model.activate_theta_stimuli(PP_dend_loc, PP_weight, pathway, interval_bw_trains, interval_bw_stimuli_in_train, num_trains, num_stimuli_in_train)\n t, v, v_dend, v_stim_locs = model.run_simulation(PP_dend_loc, recording_loc, tstop)\n\n traces = {pathway: {'t' : t, 'v_soma' : v, 'v_dend' : v_dend, 'v_stim_locs' : v_stim_locs}} \n\n\n elif (self.force_run is False and (os.path.isfile(file_name))) and save_traces:\n\n traces = pickle.load(gzip.GzipFile(file_name, \"rb\"))\n\n return traces\n\n def plot_traces(self, model, traces_dict):\n \n \n if self.base_directory:\n self.path_figs = self.base_directory + 'figs/' + 'pathway_interaction/' + model.name + '/'\n else:\n self.path_figs = model.base_directory + 'figs/' + 'pathway_interaction/'\n\n try:\n if not os.path.exists(self.path_figs) and self.save_all:\n os.makedirs(self.path_figs)\n except OSError as e:\n if e.errno != 17:\n raise\n pass\n\n print(\"The figures are saved in the directory: \", self.path_figs)\n \n fig= plt.figure()\n ax1 = fig.add_subplot(221)\n ax2 = fig.add_subplot(222)\n ax3 = fig.add_subplot(223)\n\n ax1.plot(traces_dict['SC']['t'], traces_dict['SC']['v_soma'], label = 'soma')\n ax1.plot(traces_dict['SC']['t'], traces_dict['SC']['v_dend'], label = 'distal dendrite')\n ax1.set_xlabel('Time (ms)')\n ax1.set_ylabel('Voltage (mV)')\n ax1.title.set_text('SC stimulus')\n\n ax2.plot(traces_dict['PP']['t'], traces_dict['PP']['v_soma'])\n ax2.plot(traces_dict['PP']['t'], traces_dict['PP']['v_dend'])\n ax2.set_xlabel('Time (ms)')\n ax2.set_ylabel('Voltage (mV)')\n ax2.title.set_text('PP stimulus')\n\n ax3.plot(traces_dict['SC+PP']['t'], traces_dict['SC+PP']['v_soma'])\n ax3.plot(traces_dict['SC+PP']['t'], traces_dict['SC+PP']['v_dend'])\n ax3.set_xlabel('Time (ms)')\n ax3.set_ylabel('Voltage (mV)')\n ax3.title.set_text('SC+PP stimulus')\n \n fig.subplots_adjust(wspace = 0.5, hspace = 0.6)\n handles, labels = ax1.get_legend_handles_labels()\n lgd=fig.legend(handles, labels, bbox_to_anchor=(1.0, 1.0), loc = 'upper left')\n plt.savefig(self.path_figs + 'trace_subplots', bbox_extra_artists=(lgd,), bbox_inches='tight')\n\n plt.figure()\n plt.plot(traces_dict['SC']['t'], traces_dict['SC']['v_soma'], label = 'soma')\n plt.plot(traces_dict['SC']['t'], traces_dict['SC']['v_dend'], label = 'distal dendrite')\n plt.xlabel('Time (ms)')\n plt.ylabel('Voltage (mV)')\n plt.title('SC stimulus')\n lgd=plt.legend(bbox_to_anchor=(1.0, 1.0), loc = 'upper left')\n plt.savefig(self.path_figs + 'trace_SC_stimulus', bbox_extra_artists=(lgd,), bbox_inches='tight')\n\n plt.figure()\n plt.plot(traces_dict['PP']['t'], traces_dict['PP']['v_soma'], label = 'soma')\n plt.plot(traces_dict['PP']['t'], traces_dict['PP']['v_dend'], label = 'distal dendrite')\n plt.xlabel('Time (ms)')\n plt.ylabel('Voltage (mV)')\n plt.title('PP stimulus')\n lgd=plt.legend(bbox_to_anchor=(1.0, 1.0), loc = 'upper left')\n plt.savefig(self.path_figs + 'trace_PP_stimulus', bbox_extra_artists=(lgd,), bbox_inches='tight')\n\n plt.figure()\n plt.plot(traces_dict['SC+PP']['t'], traces_dict['SC+PP']['v_soma'], label = 'soma')\n plt.plot(traces_dict['SC+PP']['t'], traces_dict['SC+PP']['v_dend'], label = 'distal dendrite')\n plt.xlabel('Time (ms)')\n plt.ylabel('Voltage (mV)')\n plt.title('SC+PP stimulus')\n lgd=plt.legend(bbox_to_anchor=(1.0, 1.0), loc = 'upper left')\n plt.savefig(self.path_figs + 'trace_SC_PP_stimulus', bbox_extra_artists=(lgd,), bbox_inches='tight')\n\n plt.figure()\n plt.plot(traces_dict['SC+depol']['t'], traces_dict['SC+depol']['v_soma'], label = 'soma')\n plt.plot(traces_dict['SC+depol']['t'], traces_dict['SC+depol']['v_dend'], label = 'distal dendrite')\n plt.xlabel('Time (ms)')\n plt.ylabel('Voltage (mV)')\n plt.title('SC stimulus + depolarization')\n lgd=plt.legend(bbox_to_anchor=(1.0, 1.0), loc = 'upper left')\n plt.savefig(self.path_figs + 'trace_SC_stimulus+depol', bbox_extra_artists=(lgd,), bbox_inches='tight')\n\n plt.figure()\n plt.plot(traces_dict['PP+depol']['t'], traces_dict['PP+depol']['v_soma'], label = 'soma')\n plt.plot(traces_dict['PP+depol']['t'], traces_dict['PP+depol']['v_dend'], label = 'distal dendrite')\n plt.xlabel('Time (ms)')\n plt.ylabel('Voltage (mV)')\n plt.title('PP stimulus + depolarization')\n lgd=plt.legend(bbox_to_anchor=(1.0, 1.0), loc = 'upper left')\n plt.savefig(self.path_figs + 'trace_PP_stimulus+depol', bbox_extra_artists=(lgd,), bbox_inches='tight')\n\n plt.figure()\n plt.plot(traces_dict['depol']['t'], traces_dict['depol']['v_soma'], label = 'soma')\n plt.plot(traces_dict['depol']['t'], traces_dict['depol']['v_dend'], label = 'distal dendrite')\n plt.xlabel('Time (ms)')\n plt.ylabel('Voltage (mV)')\n plt.title('depolarization')\n lgd=plt.legend(bbox_to_anchor=(1.0, 1.0), loc = 'upper left')\n plt.savefig(self.path_figs + 'trace_only_depol', bbox_extra_artists=(lgd,), bbox_inches='tight')\n \n ncols = 3\n nrows = int(numpy.ceil(len(list(traces_dict['SC']['v_stim_locs'].keys()))/float(ncols)))\n fig2, axs2 = plt.subplots(nrows, ncols, figsize=(ncols*4, nrows*4))\n fig2.subplots_adjust(wspace = 0.4, hspace = 0.5)\n axs2=axs2.flatten()\n for i, (key, value) in enumerate(traces_dict['SC']['v_stim_locs'].items()):\n axs2[i].plot(traces_dict['SC']['t'], value)\n axs2[i].set_title(str(key))\n axs2[i].set_xlabel('Time (ms)')\n axs2[i].set_ylabel('Voltage (mV)')\n fig2.suptitle('SC stimulus')\n plt.savefig(self.path_figs + 'local_traces_SC_stimulus', bbox_inches='tight')\n\t\n ncols = 3\n nrows = int(numpy.ceil(len(list(traces_dict['PP']['v_stim_locs'].keys()))/float(ncols)))\n fig3, axs3 = plt.subplots(nrows, ncols, figsize=(ncols*4, nrows*4))\n fig3.subplots_adjust(wspace = 0.4, hspace = 0.5)\n axs3=axs3.flatten()\n for i, (key, value) in enumerate(traces_dict['PP']['v_stim_locs'].items()):\n axs3[i].plot(traces_dict['PP']['t'], value)\n axs3[i].set_title(str(key))\n axs3[i].set_xlabel('Time (ms)')\n axs3[i].set_ylabel('Voltage (mV)')\n fig3.suptitle('PP stimulus')\n plt.savefig(self.path_figs + 'local_traces_PP_stimulus', bbox_inches='tight')\n\t\n ncols = 3\n nrows = int(numpy.ceil(len(list(traces_dict['SC+PP']['v_stim_locs'].keys()))/float(ncols)))\n fig4, axs4 = plt.subplots(nrows, ncols, figsize=(ncols*4, nrows*4))\n fig4.subplots_adjust(wspace = 0.4, hspace = 0.5)\n axs4=axs4.flatten()\n for i, (key, value) in enumerate(traces_dict['SC+PP']['v_stim_locs'].items()):\n axs4[i].plot(traces_dict['SC+PP']['t'], value)\n axs4[i].set_title(str(key))\n axs4[i].set_xlabel('Time (ms)')\n axs4[i].set_ylabel('Voltage (mV)')\n fig4.suptitle('SC+PP stimulus')\n plt.savefig(self.path_figs + 'local_traces_SC_PP_stimulus', bbox_inches='tight')\n\n ncols = 3\n nrows = int(numpy.ceil(len(list(traces_dict['SC+depol']['v_stim_locs'].keys()))/float(ncols)))\n fig5, axs5 = plt.subplots(nrows, ncols, figsize=(ncols*4, nrows*4))\n fig5.subplots_adjust(wspace = 0.4, hspace = 0.5)\n axs5=axs5.flatten()\n for i, (key, value) in enumerate(traces_dict['SC+depol']['v_stim_locs'].items()):\n axs5[i].plot(traces_dict['SC+depol']['t'], value)\n axs5[i].set_title(str(key))\n axs5[i].set_xlabel('Time (ms)')\n axs5[i].set_ylabel('Voltage (mV)')\n fig5.suptitle('SC stimulus + depolarization')\n plt.savefig(self.path_figs + 'local_traces_SC_stimulus+depol', bbox_inches='tight')\n\n ncols = 3\n nrows = int(numpy.ceil(len(list(traces_dict['PP+depol']['v_stim_locs'].keys()))/float(ncols)))\n fig6, axs6 = plt.subplots(nrows, ncols, figsize=(ncols*4, nrows*4))\n fig6.subplots_adjust(wspace = 0.4, hspace = 0.5)\n axs6=axs6.flatten()\n for i, (key, value) in enumerate(traces_dict['PP+depol']['v_stim_locs'].items()):\n axs6[i].plot(traces_dict['PP+depol']['t'], value)\n axs6[i].set_title(str(key))\n axs6[i].set_xlabel('Time (ms)')\n axs6[i].set_ylabel('Voltage (mV)')\n fig6.suptitle('PP stimulus + depolarization')\n plt.savefig(self.path_figs + 'local_traces_PP_stimulus+depol', bbox_inches='tight')\n\n\n def extract_plateau_features(self, model, traces, traces_no_input, stimuli_params, pathway): \n\n interval_bw_trains, interval_bw_stimuli_in_train, num_trains, num_stimuli_in_train = stimuli_params\n\n\n time = traces['t']\n v_dend = traces['v_dend']\n v_soma = traces['v_soma']\n\n t_no_input = traces_no_input[0]\n v_dend_no_input = traces_no_input[2]\n v_soma_no_input = traces_no_input[1] #[t, v_soma, v_dend]\n \n if not numpy.array_equal(time, t_no_input): #if the time vectors are not equal, the traces are resampled with fixed time step\n dt = 0.025\n time_vector = numpy.arange(t[0], t[-1], dt) #from the first to the last element of the original time vector\n\n interp_trace = numpy.interp(time_vector, time, v_dend)\n interp_trace_no_input = numpy.interp(time_vector, t_no_input, v_dend_no_input)\n\n depol_dend = interp_trace - interp_trace_no_input\n time = time_vector\n else:\n\n depol_dend = v_dend-v_dend_no_input\n\n ''' Remove points with high derivative, and around the peak, interpolate remaining points to get the plateau without the bAPs'''\n\n dt=numpy.diff(time)\n dV=numpy.diff(depol_dend)\n\n deriv_dend = dV/dt\n\n indices_to_keep = numpy.where((abs(deriv_dend) < 1))[0] \n\n peaks_ind, _ = find_peaks(depol_dend)\n\n peaks_v = depol_dend\n\n #print(peaks_ind)\n\n\n for i in peaks_ind:\n\n indices_to_keep = numpy.setdiff1d(indices_to_keep, numpy.where((time >= time[i] - 0.2) & (time <= time[i] + 0.2))[0]) # remove peaks and points 0.2 ms around the peaks\n\n\n depol_dend_plateau = depol_dend[indices_to_keep]\n\n time_plateau = time[indices_to_keep]\n\n interp_depol_dend_plateau = numpy.interp(time, time_plateau, depol_dend_plateau)\n\n\n dV_interp_depol_dend_plateau=numpy.diff(interp_depol_dend_plateau)\n deriv_interp_depol_dend_plateau = dV_interp_depol_dend_plateau/dt\n\n\n ''' start figure'''\n plt.figure()\n\n stim_start = model.start\n\n start_indices = []\n stop_indices = []\n plateau_amplitudes = []\n plateau_durations = []\n for i in range(num_trains):\n start = stim_start + i * interval_bw_trains\n start_index = numpy.where(time >= start)[0][0]\n stop = start + interval_bw_trains\n stop_index = numpy.where(time >= stop)[0][0]\n start_indices.append(start_index)\n stop_indices.append(stop_index)\n\n ''' extract plateau duration and plateau amplitude'''\n\n amplitude = numpy.max(interp_depol_dend_plateau[start_index:stop_index])\n plateau_amplitudes.append(amplitude)\n\n half_amplitude = amplitude/2.0 \n\n\n cross_idx = numpy.argwhere(numpy.diff(numpy.sign(interp_depol_dend_plateau[start_index:stop_index] - half_amplitude))).flatten() # First the interp_depol_dend_plateau[start_index:stop_index] - half_amplitude and the corresponding signs are calculated using numpy.sign; numpy.diff gives the positions, where the sign changes (e.g. the lines cross); numpy.argwhere gives the indices.\n\n #print(cross_idx)\n\n plt.plot(time[start_index:stop_index][cross_idx], interp_depol_dend_plateau[start_index:stop_index][cross_idx], 'oc')\n\n\n durs = []\n if len(cross_idx) >1:\n for j in range(0, len(cross_idx), 2):\n #print(j)\n if j+1 < len(cross_idx):\n if numpy.sign(deriv_interp_depol_dend_plateau[start_index:stop_index][cross_idx])[j] > 0 and numpy.sign(deriv_interp_depol_dend_plateau[start_index:stop_index][cross_idx])[j+1] < 0:\n duration = time[start_index:stop_index][cross_idx][j+1] - time[start_index:stop_index][cross_idx][j]\n durs.append(duration)\n #print(durs)\n if len(durs) > 0:\n plt.plot([time[start_index:stop_index][cross_idx][numpy.argmax(durs)*2], time[start_index:stop_index][cross_idx][numpy.argmax(durs)*2 +1]], [interp_depol_dend_plateau[start_index:stop_index][cross_idx][numpy.argmax(durs)*2], interp_depol_dend_plateau[start_index:stop_index][cross_idx][numpy.argmax(durs)*2+1]], 'o-g')\n plateau_durations.append(numpy.max(durs))\n else:\n plateau_durations.append(float('nan'))\n else:\n plateau_durations.append(float('nan'))\n \n\n #print('plateau amplitudes: ', plateau_amplitudes)\n #print('plateau durations: ', plateau_durations)\n\n ''' plot info'''\n plt.plot(time, depol_dend, color='orange')\n #plt.plot(time_plateau, depol_dend_plateau, 'm*')\n #plt.plot(time[:-1], deriv_dend, color='black')\n #plt.plot(time[peaks_ind], depol_dend[peaks_ind], 'go')\n plt.plot(time, interp_depol_dend_plateau, color='blue')\n plt.plot(time[start_indices], interp_depol_dend_plateau[start_indices], 'or')\n plt.plot(time[stop_indices], interp_depol_dend_plateau[stop_indices], 'ok')\n plt.title(pathway + ' - Distal dendrite')\n plt.savefig(self.path_figs + pathway + '_plateau_half_dur', bbox_inches='tight')\n\n for i, plateau_dur in enumerate(plateau_durations): # if there is no interpretable plateau, we neither interpret its amplitude\n if numpy.isnan(plateau_dur):\n plateau_amplitudes[i] = float('nan')\n\n\n return plateau_amplitudes, plateau_durations\n\n\n def extract_features(self, model, traces, traces_no_input, stimuli_params, pathway):\n\n #print(pathway)\n\n interval_bw_trains, interval_bw_stimuli_in_train, num_trains, num_stimuli_in_train = stimuli_params\n\n time = traces['t']\n v_dend = traces['v_dend']\n v_soma = traces['v_soma']\n\n start_indices = []\n stop_indices = []\n plateau_amplitudes = []\n plateau_durations = []\n\n num_APs_list = []\n ISIs_list = []\n bAP_amp_list = []\n\n stim_start = model.start\n\n for i in range(num_trains):\n start = stim_start + i * interval_bw_trains\n start_index = numpy.where(time >= start)[0][0]\n stop = start + interval_bw_trains\n stop_index = numpy.where(time >= stop)[0][0]\n start_indices.append(start_index)\n stop_indices.append(stop_index)\n\n ''' extract number of APs, bAP amp, soma ISI'''\n\n efel_results = self.extract_efel_features(0, time[stop_index] - time[start_index], [time[start_index:stop_index], v_dend[start_index:stop_index]], ['Spikecount', 'AP_amplitude', 'AP_begin_voltage', 'peak_voltage'])\n #num_APs = spikecount(0, time[stop_index] - time[start_index], [time[start_index:stop_index], v_dend[start_index:stop_index]])\n #print(efel_results)\n if pathway == 'SC' or pathway == 'PP' or pathway == 'SC+depol' or pathway == 'PP+depol':\n num_APs = efel_results['Spikecount'][0]\n num_APs_list.append(num_APs)\n #print('num APs: ', num_APs)\n \n if pathway == 'SC+depol' or pathway == 'PP+depol':\n try: # sometimes eFEL finds less AP_begin_voltage than peak_voltage\n bAP_amp = efel_results['peak_voltage'] - efel_results['AP_begin_voltage'] #efel_results['AP_amplitude'] somehow this often gives empty array\n except:\n bAP_amp = float('nan') \n bAP_amp_list.append(numpy.mean(bAP_amp))\n #print('bAP amp', bAP_amp)\n \n\n if pathway == 'SC+PP':\n efel_results_soma = self.extract_efel_features(0, time[stop_index] - time[start_index], [time[start_index:stop_index], v_soma[start_index:stop_index]], ['ISI_values'])\n ISI = efel_results_soma['ISI_values']\n if ISI is None:\n ISI = []\n #print('ISI: ', ISI)\n ISIs_list.append(numpy.mean(ISI))\n #print('ISIs_list: ', ISIs_list)\n\n\n if pathway == 'SC' or pathway == 'PP':\n features = {pathway: {'num AP' : {'mean' : numpy.mean(num_APs_list), 'std' : numpy.std(num_APs_list)}}}\n\n if pathway == 'SC+depol':\n #later this should get a bAP_amp feature\n plateau_amplitudes, plateau_durations = self.extract_plateau_features(model, traces, traces_no_input, stimuli_params, pathway)\n features = {pathway: {'num AP' : {'mean' : numpy.mean(num_APs_list), 'std' : numpy.std(num_APs_list)}, 'bAP amp' : {'mean' : numpy.mean(bAP_amp_list) * mV, 'std' : numpy.std(bAP_amp_list) * mV}, '1st plateau duration' : {'mean' : numpy.nanmean(plateau_durations[0]) * ms, 'std' : numpy.nanstd(plateau_durations[0]) * ms}, '3-5th plateau duration' : {'mean' : numpy.nanmean(plateau_durations[2:]) * ms, 'std' : numpy.nanstd(plateau_durations[2:]) * ms}}}\n\n if pathway == 'PP+depol':\n #later this should get a bAP_amp feature\n plateau_amplitudes, plateau_durations = self.extract_plateau_features(model, traces, traces_no_input, stimuli_params, pathway)\n features = {pathway: {'num AP' : {'mean' : numpy.mean(num_APs_list), 'std' : numpy.std(num_APs_list)}, 'bAP amp' : {'mean' : numpy.mean(bAP_amp_list) * mV, 'std' : numpy.std(bAP_amp_list) * mV}, 'plateau duration' : {'mean' : numpy.nanmean(plateau_durations) * ms, 'std' : numpy.nanstd(plateau_durations) * ms}, '1st plateau duration' : {'mean' : numpy.nanmean(plateau_durations[0]) * ms, 'std' : numpy.nanstd(plateau_durations[0]) * ms}, '3-5th plateau duration' : {'mean' : numpy.nanmean(plateau_durations[2:]) * ms, 'std' : numpy.nanstd(plateau_durations[2:]) * ms}}}\n\n if pathway == 'SC+PP':\n plateau_amplitudes, plateau_durations = self.extract_plateau_features(model, traces, traces_no_input, stimuli_params, pathway)\n features = {pathway: {'plateau amplitude' : {'mean' : numpy.nanmean(plateau_amplitudes) * mV, 'std' : numpy.nanstd(plateau_amplitudes) * mV}, 'plateau duration' : {'mean' : numpy.nanmean(plateau_durations) * ms, 'std' : numpy.nanstd(plateau_durations) * ms}, '1st plateau duration' : {'mean' : numpy.nanmean(plateau_durations[0]) * ms, 'std' : numpy.nanstd(plateau_durations[0]) * ms}, '3-5th plateau duration' : {'mean' : numpy.nanmean(plateau_durations[2:]) * ms, 'std' : numpy.nanstd(plateau_durations[2:]) * ms}, 'somatic AP ISI' : {'mean' : numpy.mean(ISIs_list) * ms, 'std' : numpy.std(ISIs_list)* ms}}}\n #print('plateau features: ', plateau_amplitudes, plateau_durations)\n\n #print(features)\n\n return features\n\n\n def plot_features(self, prediction):\n \n observation = self.observation\n '''\n feat_means = []\n feat_stds = []\n labels = []\n plt.figure()\n for pathway, feats in prediction.items():\n for feat, v in feats.items():\n feat_means.append(v['mean'])\n feat_stds.append(v['std'])\n labels.append(pathway + ' - ' + feat)\n\n y = range(len(feat_means))\n\n plt.errorbar(feat_means, y, xerr=feat_stds, linestyle='none', marker='o', color='blue')\n plt.yticks(y, labels)\n plt.savefig(self.path_figs + 'feature_values', bbox_inches='tight')\n '''\n\n model_num_AP_means = []\n model_plateau_duration_means = []\n model_plateau_amplitude_means = []\n model_ISI_means = []\n model_bAP_amp_means = []\n model_1st_plateau_duration_means = []\n model_3_5th_plateau_duration_means = []\n \n model_num_AP_stds = []\n model_plateau_duration_stds = []\n model_plateau_amplitude_stds = []\n model_ISI_stds = []\n model_bAP_amp_stds = []\n model_1st_plateau_duration_stds = []\n model_3_5th_plateau_duration_stds = []\n\n exp_num_AP_means = []\n exp_plateau_duration_means = []\n exp_plateau_amplitude_means = []\n exp_ISI_means = []\n exp_bAP_amp_means = []\n exp_1st_plateau_duration_means = []\n exp_3_5th_plateau_duration_means = [] \n\n exp_num_AP_stds = []\n exp_plateau_duration_stds = []\n exp_plateau_amplitude_stds = []\n exp_ISI_stds = []\n exp_bAP_amp_stds = []\n exp_1st_plateau_duration_stds = []\n exp_3_5th_plateau_duration_stds = []\n\n labels_num_AP = []\n labels_plateau_duration = []\n labels_plateau_amplitude = []\n labels_ISI = []\n labels_bAP_amp = []\n labels_1st_plateau_duration = []\n labels_3_5th_plateau_duration = []\n \n\n for pathway, feats in prediction.items():\n if 'num AP' in list(feats.keys()):\n model_num_AP_means.append(prediction[pathway]['num AP']['mean'])\n model_num_AP_stds.append(prediction[pathway]['num AP']['std'])\n\n exp_num_AP_means.append(observation[pathway]['num AP']['mean'])\n exp_num_AP_stds.append(observation[pathway]['num AP']['std'])\n\n labels_num_AP.append(pathway + ' - ' + 'num AP')\n \n if 'bAP amp' in list(feats.keys()):\n model_bAP_amp_means.append(prediction[pathway]['bAP amp']['mean'])\n model_bAP_amp_stds.append(prediction[pathway]['bAP amp']['std'])\n\n exp_bAP_amp_means.append(observation[pathway]['bAP amp']['mean'])\n exp_bAP_amp_stds.append(observation[pathway]['bAP amp']['std'])\n\n labels_bAP_amp.append(pathway + ' - ' + 'bAP amp') \n\n if 'plateau duration' in list(feats.keys()):\n model_plateau_duration_means.append(prediction[pathway]['plateau duration']['mean'])\n model_plateau_duration_stds.append(prediction[pathway]['plateau duration']['std'])\n\n exp_plateau_duration_means.append(observation[pathway]['plateau duration']['mean'])\n exp_plateau_duration_stds.append(observation[pathway]['plateau duration']['std'])\n\n labels_plateau_duration.append(pathway + ' - ' + 'plateau duration')\n \n if '1st plateau duration' in list(feats.keys()):\n model_1st_plateau_duration_means.append(prediction[pathway]['1st plateau duration']['mean'])\n model_1st_plateau_duration_stds.append(prediction[pathway]['1st plateau duration']['std'])\n\n exp_1st_plateau_duration_means.append(observation[pathway]['1st plateau duration']['mean'])\n exp_1st_plateau_duration_stds.append(observation[pathway]['1st plateau duration']['std'])\n\n labels_1st_plateau_duration.append(pathway + ' - ' + '1st plateau duration')\n \n if '3-5th plateau duration' in list(feats.keys()):\n model_3_5th_plateau_duration_means.append(prediction[pathway]['3-5th plateau duration']['mean'])\n model_3_5th_plateau_duration_stds.append(prediction[pathway]['3-5th plateau duration']['std'])\n\n exp_3_5th_plateau_duration_means.append(observation[pathway]['3-5th plateau duration']['mean'])\n exp_3_5th_plateau_duration_stds.append(observation[pathway]['3-5th plateau duration']['std'])\n\n labels_3_5th_plateau_duration.append(pathway + ' - ' + '3-5th plateau duration')\n\n if 'plateau amplitude' in list(feats.keys()):\n model_plateau_amplitude_means.append(prediction[pathway]['plateau amplitude']['mean'])\n model_plateau_amplitude_stds.append(prediction[pathway]['plateau amplitude']['std'])\n\n exp_plateau_amplitude_means.append(observation[pathway]['plateau amplitude']['mean'])\n exp_plateau_amplitude_stds.append(observation[pathway]['plateau amplitude']['std'])\n\n labels_plateau_amplitude.append(pathway + ' - ' + 'plateau amplitude')\n\n if 'somatic AP ISI' in list(feats.keys()):\n model_ISI_means.append(prediction[pathway]['somatic AP ISI']['mean'])\n model_ISI_stds.append(prediction[pathway]['somatic AP ISI']['std'])\n\n exp_ISI_means.append(observation[pathway]['somatic AP ISI']['mean'])\n exp_ISI_stds.append(observation[pathway]['somatic AP ISI']['std'])\n\n labels_ISI.append(pathway + ' - ' + 'somatic AP ISI')\n \n model_all_plateau_duration_means = model_plateau_duration_means + model_1st_plateau_duration_means + model_3_5th_plateau_duration_means\n model_all_plateau_duration_stds = model_plateau_duration_stds + model_1st_plateau_duration_stds + model_3_5th_plateau_duration_stds\n exp_all_plateau_duration_means = exp_plateau_duration_means + exp_1st_plateau_duration_means + exp_3_5th_plateau_duration_means\n exp_all_plateau_duration_stds = exp_plateau_duration_stds + exp_1st_plateau_duration_stds + exp_3_5th_plateau_duration_stds\n labels_all_plateau_duration = labels_plateau_duration + labels_1st_plateau_duration + labels_3_5th_plateau_duration\n \n fig, axs = plt.subplots(3,2, figsize=(2*4, 2*4))\n plt.subplots_adjust(wspace = 0.5, hspace = 0.8)\n\n axs[0,0].errorbar(range(len(labels_num_AP)), model_num_AP_means, yerr=model_num_AP_stds, linestyle='none', marker='o', color='blue')\n axs[0,0].errorbar(range(len(labels_num_AP)), exp_num_AP_means, yerr=exp_num_AP_stds, linestyle='none', marker='o', color='red')\n axs[0,0].set_xticks(range(len(labels_num_AP)))\n axs[0,0].set_xticklabels(labels_num_AP, rotation = 20)\n axs[0,0].set_ylabel('# APs')\n\n axs[0,1].errorbar(range(len(labels_ISI)), model_ISI_means, yerr=model_ISI_stds, linestyle='none', marker='o', color='blue', label = 'model')\n axs[0,1].errorbar(range(len(labels_ISI)), exp_ISI_means, yerr=exp_ISI_stds, linestyle='none', marker='o', color='red', label = 'experiment')\n axs[0,1].set_xticks(range(len(labels_ISI)))\n axs[0,1].set_xticklabels(labels_ISI, rotation = 20)\n axs[0,1].set_ylabel('Somatic AP ISI (ms)')\n \n axs[1,0].errorbar(range(len(labels_plateau_amplitude)), model_plateau_amplitude_means, yerr=model_plateau_amplitude_stds, linestyle='none', marker='o', color='blue')\n axs[1,0].errorbar(range(len(labels_plateau_amplitude)), exp_plateau_amplitude_means, yerr=exp_plateau_amplitude_stds, linestyle='none', marker='o', color='red')\n axs[1,0].set_xticks(range(len(labels_plateau_amplitude)))\n axs[1,0].set_xticklabels(labels_plateau_amplitude, rotation = 20)\n axs[1,0].set_ylabel('Plateau amplitude (mV)')\n \n '''\n axs[1,1].errorbar(range(len(labels_plateau_duration)), model_plateau_duration_means, yerr=model_plateau_duration_stds, linestyle='none', marker='o', color='blue')\n axs[1,1].errorbar(range(len(labels_plateau_duration)), exp_plateau_duration_means, yerr=exp_plateau_duration_stds, linestyle='none', marker='o', color='red')\n axs[1,1].set_xticks(range(len(labels_plateau_duration)))\n axs[1,1].set_xticklabels(labels_plateau_duration, rotation = 20)\n axs[1,1].set_ylabel('Plateau duration (ms)')\n '''\n\n axs[1,1].errorbar(range(len(labels_all_plateau_duration)), model_all_plateau_duration_means, yerr=model_all_plateau_duration_stds, linestyle='none', marker='o', color='blue')\n axs[1,1].errorbar(range(len(labels_all_plateau_duration)), exp_all_plateau_duration_means, yerr=exp_all_plateau_duration_stds, linestyle='none', marker='o', color='red')\n axs[1,1].set_xticks(range(len(labels_all_plateau_duration)))\n axs[1,1].set_xticklabels(labels_all_plateau_duration, rotation = 90)\n axs[1,1].set_ylabel('Plateau duration (ms)')\n \n axs[2,0].errorbar(range(len(labels_bAP_amp)), model_bAP_amp_means, yerr=model_bAP_amp_stds, linestyle='none', marker='o', color='blue')\n axs[2,0].errorbar(range(len(labels_bAP_amp)), exp_bAP_amp_means, yerr=exp_bAP_amp_stds, linestyle='none', marker='o', color='red')\n axs[2,0].set_xticks(range(len(labels_bAP_amp)))\n axs[2,0].set_xticklabels(labels_bAP_amp, rotation = 20)\n axs[2,0].set_ylabel('bAP amplitude (mV)')\n \n axs[2,1].set_axis_off()\n \n\n lgd=axs[0,1].legend(bbox_to_anchor=(1.0, 1.0), loc = 'upper left')\n\n fig.suptitle('Feature values')\n\n if self.save_all:\n plt.savefig(self.path_figs + 'feature_values', bbox_extra_artists=(lgd,), bbox_inches='tight')\n\n\n def plot_errors(self, errors):\n\n feat_errors = []\n labels = []\n plt.figure()\n for pathway, feats in errors.items():\n for feat, v in feats.items():\n feat_errors.append(v)\n labels.append(pathway + ' - ' + feat)\n\n y = range(len(feat_errors))\n\n plt.plot(feat_errors, y, linestyle='none', marker='o', color='blue')\n plt.yticks(y, labels)\n plt.title('Feature errors')\n plt.xlabel('# SDs')\n plt.savefig(self.path_figs + 'feature_errors', bbox_inches='tight')\n\n\n def validate_observation(self, observation):\n pass\n\n\n def generate_prediction(self, model, verbose=False):\n \"\"\"Implementation of sciunit.Test.generate_prediction.\"\"\"\n \n model.start = 400\n\n efel.reset()\n plt.close('all') #needed to avoid overlapping of saved images when the test is run on multiple models in a for loop\n\n if self.base_directory:\n self.path_results = self.base_directory + 'results/' + 'pathway_interaction/' + model.name + '/'\n else:\n self.path_results = model.base_directory + 'results/' + 'pathway_interaction/'\n\n try:\n if not os.path.exists(self.path_results):\n os.makedirs(self.path_results)\n except OSError as e:\n if e.errno != 17:\n raise\n pass\n\n dist_range = [0,9999999999]\n\n model.SecList_name = model.ObliqueSecList_name\n SC_dend_loc, SC_locations_distances = model.get_random_locations_multiproc(self.num_of_dend_locations, self.random_seed, dist_range, self.trunk_origin) # number of random locations , seed\n\n model.SecList_name = model.TuftSecList_name\n PP_dend_loc, PP_locations_distances = model.get_random_locations_multiproc(self.num_of_dend_locations, self.random_seed, dist_range, self.trunk_origin) # number of random locations , seed\n\n \"\"\"Finding recording location on Trunk whose distance is closest to 300 um\"\"\"\n distances = [self.config[\"distance of recording location\"]]\n tolerance = self.config[\"distance tolerance\"]\n\n rec_locs, rec_locs_actual_distances = model.find_trunk_locations_multiproc(distances, tolerance, self.trunk_origin)\n #print(\"recording locs\", rec_locs, rec_locs_actual_distances)\n\n # recording_loc = min(rec_locs_actual_distances, key=abs(distances[0] - rec_locs_actual_distances.get))\n recording_loc = min(rec_locs_actual_distances.items(), key=lambda kv : abs(kv[1] - distances[0]))\n #print(recording_loc, type(recording_loc))\n\n\n if not model.AMPA_name:\n print('')\n print('The built in Exp2Syn is used as the AMPA component. Tau1 =', model.AMPA_tau1, ',Tau2 =', model.AMPA_tau2 , '.')\n print('')\n if not model.NMDA_name: \n print('')\n print('The default NMDA model of HippoUnit is used with Jahr, Stevens voltage dependence.')\n print('')\n\n print(\"Adjusting synaptic weights ...\")\n\n SC_weight = self.adjust_syn_weight(model, SC_dend_loc, pathway = 'SC') #0.000748\n print('SC AMPA weight', SC_weight)\n self.message_to_logFile += \"SC AMPA weight: \" + str(SC_weight) + \"\\n\"\n\n PP_weight = self.adjust_syn_weight(model, PP_dend_loc, pathway = 'PP') #0.000748\n print('PP AMPA weight', PP_weight)\n self.message_to_logFile += \"PP AMPA weight: \" + str(PP_weight) + \"\\n\"\n \n pool = multiprocessing.Pool(1, maxtasksperchild = 1)\n t_no_input_rec_dend, v_soma_no_input, v_no_input_rec_dend = pool.apply(self.generate_no_input_traces, (model, recording_loc,)) # this is run in multiprocessing pool so that the model can be completely killed after done \n pool.terminate()\n pool.join()\n del pool\n\n interval_bw_trains = 1/ self.config[\"frequency of stimulus sequence\"] * 1000\n interval_bw_stimuli_in_train = 1/ self.config[\"frequency of trains\"] * 1000\n num_trains = self.config[\"number of trains\"]\n num_stimuli_in_train = self.config[\"number of stimuli in a train\"]\n\n\n stimuli_params =[interval_bw_trains, interval_bw_stimuli_in_train, num_trains, num_stimuli_in_train] \n\n # self.adjust_num_syn(model, SC_weight, PP_weight, recording_loc, stimuli_params, t_no_input_rec_dend, v_no_input_rec_dend, 'SC')\n \n pool = NonDaemonPool(self.npool, maxtasksperchild=1) # NoDeamonPool is needed because Random locations are needed to be chosen several times, for which the model is loaded in a multiprocessing pool \n adjust_num_syn_= functools.partial(self.adjust_num_syn, model, SC_weight, PP_weight, recording_loc, stimuli_params, t_no_input_rec_dend, v_no_input_rec_dend)\n dend_locs = pool.map(adjust_num_syn_, ['SC', 'PP'], chunksize=1)\n\n pool.terminate()\n pool.join()\n del pool\n\n dend_locs_dict = {} \n for locs in dend_locs:\n dend_locs_dict.update(locs)\n \n (rec_ndend, xloc), distance = recording_loc\n stimuli_list = [0.25, 400, num_stimuli_in_train * interval_bw_stimuli_in_train, rec_ndend, xloc, rec_ndend, xloc]\n\n current_amp = self.adjust_current_amplitude(model, stimuli_list)\n \n SC_dend_loc = dend_locs_dict['SC'] #[['dendrite[52]', 0.1], ['dendrite[112]', 0.3], ['dendrite[107]', 0.5], ['dendrite[54]', 0.7], ['dendrite[90]', 0.07142857142857142], ['dendrite[84]', 0.9], ['dendrite[98]', 0.7], ['dendrite[107]', 0.9285714285714286]] #dend_locs_dict['SC'] \n PP_dend_loc = dend_locs_dict['PP'] #[['dendrite[119]', 0.5], ['dendrite[155]', 0.5], ['dendrite[152]', 0.7], ['dendrite[130]', 0.5]] #dend_locs_dict['PP']\n\n self.message_to_logFile += \"SC dencd_loc: \" + str(SC_dend_loc) + \"\\n\"\n self.message_to_logFile += \"PP dencd_loc: \" + str(PP_dend_loc) + \"\\n\"\n\n tstop = 1600\n\n pool = multiprocessing.Pool(self.npool, maxtasksperchild=1)\n theta_pathway_stimulus_= functools.partial(self.theta_pathway_stimulus, model, SC_weight, PP_weight, SC_dend_loc, PP_dend_loc, recording_loc, stimuli_params, tstop, current_amp, save_traces=True) # save_traces=True because we want to save traces into pickle files for later use\n traces = pool.map(theta_pathway_stimulus_, ['SC', 'PP', 'SC+PP', 'depol', 'SC+depol', 'PP+depol'], chunksize=1)\n\n pool.terminate()\n pool.join()\n del pool\n\n traces_dict = {} \n for trace in traces:\n traces_dict.update(trace)\n # print(traces_dict)\n\n\n self.plot_traces(model, traces_dict)\n\n print('Extracting features')\n\n prediction = {}\n for pathway, traces in traces_dict.items():\n if pathway != 'depol':\n features = self.extract_features(model, traces, [t_no_input_rec_dend, v_soma_no_input, v_no_input_rec_dend], stimuli_params, pathway)\n prediction.update(features)\n #print(prediction)\n\n\n ''' printing to logFile'''\n\n filepath = self.path_results + self.test_log_filename\n self.logFile = open(filepath, 'w') # if it is opened before multiprocessing, the multiporeccing won't work under python3\n\n\n if not model.AMPA_name:\n self.logFile.write('The built in Exp2Syn is used as the AMPA component. Tau1 = ' + str(model.AMPA_tau1) + ', Tau2 = ' + str(model.AMPA_tau2) + '.\\n')\n self.logFile.write(\"---------------------------------------------------------------------------------------------------\\n\")\n if not model.NMDA_name:\n self.logFile.write('The default NMDA model of HippoUnit is used with Jahr, Stevens voltage dependence.\\n')\n self.logFile.write(\"---------------------------------------------------------------------------------------------------\\n\")\n\n self.logFile.write(self.message_to_logFile)\n self.message_to_logFile = ''\n\n\n prediction_json = copy.deepcopy(prediction)\n\n for key, val in list(prediction.items()):\n for ke, va in list(val.items()):\n for k, v in list(va.items()):\n try:\n v = str(v)\n quantity_parts = v.split(\"*\")\n prediction_json[key][ke][k] = \" \".join(quantity_parts)\n except:\n prediction_json[key][ke][k] = str(v)\n\n\n\n file_name_json = self.path_results + 'pathway_interaction_model_features.json'\n\n json.dump(prediction_json, open(file_name_json, \"w\"), indent=4)\n\n self.plot_features(prediction)\n\n\n\n print(\"Results are saved in the directory: \", self.path_results)\n\n efel.reset()\n\n return prediction\n\n def compute_score(self, observation, prediction, verbose=False):\n \"\"\"Implementation of sciunit.Test.score_prediction.\"\"\"\n\n\n score, errors, penalty_PP_depol, penalty_SC_PP = scores.ZScore_PathwayInteraction.compute(observation,prediction)\n\n score=scores.ZScore_PathwayInteraction(score)\n\n\n\n file_name_errors = self.path_results + 'pathway_interaction_errors.json'\n json.dump(errors, open(file_name_errors, \"w\"), indent=4)\n\n\n if self.show_plot:\n plt.show()\n\n final_score={'score' : str(score)}\n file_name_score= self.path_results + 'final_score.json'\n json.dump(final_score, open(file_name_score, \"w\"), indent=4)\n\n if penalty_PP_depol > 0:\n self.logFile.write(\"---------------------------------------------------------------------------------------------------\\n\")\n self.logFile.write('PP + depolarization stimulus didn\\'t generate interpretable plateau potential. A penalty (100) is added to the final score. Please have a look at the traces. \\n')\n self.logFile.write(\"---------------------------------------------------------------------------------------------------\\n\")\n\n if penalty_SC_PP > 0:\n self.logFile.write(\"---------------------------------------------------------------------------------------------------\\n\")\n self.logFile.write('SC+PP stimulus didn\\'t generate interpretable plateau potential. A penalty (100) is added to the final score. Please have a look at the traces. \\n')\n self.logFile.write(\"---------------------------------------------------------------------------------------------------\\n\")\n\n self.logFile.write(\"---------------------------------------------------------------------------------------------------\\n\")\n self.logFile.write(str(score)+'\\n')\n self.logFile.write(\"---------------------------------------------------------------------------------------------------\\n\")\n\n self.logFile.close()\n\n self.logFile = self.path_results + self.test_log_filename\n \n self.plot_errors(errors)\n\n return score\n\n def bind_score(self, score, model, observation, prediction):\n\n if self.path_figs is not None:\n score.related_data[\"figures\"] = [self.path_figs + 'feature_values.png', self.path_figs + 'feature_errors.png',\n self.path_figs + 'SC+PP_plateau_half_dur.png', self.path_figs + 'PP+depol_plateau_half_dur.png',\n self.path_figs + 'local_traces_SC_stimulus+depol.png', self.path_figs + 'local_traces_PP_stimulus+depol.png',\n self.path_figs + 'local_traces_SC_PP_stimulus.png', self.path_figs + 'local_traces_PP_stimulus.png',\n self.path_figs + 'local_traces_SC_stimulus.png', self.path_figs + 'trace_SC_stimulus+depol.png',\n self.path_figs + 'trace_SC_stimulus.png', self.path_figs + 'trace_SC_PP_stimulus.png',\n self.path_figs + 'trace_PP_stimulus+depol.png', self.path_figs + 'trace_PP_stimulus.png',\n self.path_figs + 'trace_only_depol.png', self.path_figs + 'trace_subplots.png']\n score.related_data[\"results\"] = [self.path_results + 'pathway_interaction_model_features.json', self.path_results + 'pathway_interaction_errors.json',\n self.path_results + 'final_score.json', self.path_results + 'test_log.txt']\n return score\n","repo_name":"KaliLab/hippounit","sub_path":"hippounit/tests/test_PathwayInteraction.py","file_name":"test_PathwayInteraction.py","file_ext":"py","file_size_in_byte":75879,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"31406551810","text":"#!/usr/bin/env python\n\nimport psycopg2\nimport psycopg2.extras\nimport argparse\nimport json\nimport os\nimport zipfile\nimport paramiko\nimport numpy as np\nimport cv2\nfrom tqdm import tqdm\n\n\ndef options():\n parser = argparse.ArgumentParser(description='Retrieve data from a LemnaTec database.',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"-c\", \"--config\", help=\"JSON config file.\", required=True)\n parser.add_argument(\"-o\", \"--outdir\", help=\"Output directory for results.\", required=True)\n args = parser.parse_args()\n\n if os.path.exists(args.outdir):\n raise IOError(\"The directory {0} already exists!\".format(args.outdir))\n\n return args\n\n\ndef main():\n # Read user options\n args = options()\n\n # Read the database connetion configuration file\n config = open(args.config, 'rU')\n # Load the JSON configuration data\n db = json.load(config)\n\n # SSH connection\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(db['hostname'], username='root', password=db['password'])\n sftp = ssh.open_sftp()\n\n # Make the output directory\n os.mkdir(args.outdir)\n\n # Create the SnapshotInfo.csv file\n csv = open(os.path.join(args.outdir, \"SnapshotInfo.csv\"), \"w\")\n\n # Connect to the LemnaTec database\n conn = psycopg2.connect(host=db['hostname'], user=db['username'], password=db['password'], database=db['database'])\n cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n\n # Get all snapshots\n snapshots = {}\n cur.execute(\"SELECT * FROM snapshot WHERE measurement_label = %s;\", [db['experiment']])\n for row in cur:\n snapshots[row['id']] = row\n\n # Get all image metadata\n images = {}\n raw_images = {}\n cur.execute(\"SELECT * FROM snapshot INNER JOIN tiled_image ON snapshot.id = tiled_image.snapshot_id INNER JOIN \"\n \"tile ON tiled_image.id = tile.tiled_image_id\")\n for row in cur:\n if row['snapshot_id'] in snapshots:\n image_name = row['camera_label'] + '_' + str(row['tiled_image_id']) + '_' + str(row['frame'])\n if row['snapshot_id'] in images:\n images[row['snapshot_id']].append(image_name)\n else:\n images[row['snapshot_id']] = [image_name]\n raw_images[image_name] = {'raw_image_oid': row['raw_image_oid'],\n 'rotate_flip_type': row['rotate_flip_type'], 'dataformat': row['dataformat']}\n\n # Create SnapshotInfo.csv file\n header = ['experiment', 'id', 'plant barcode', 'car tag', 'timestamp', 'weight before', 'weight after',\n 'water amount', 'completed', 'measurement label', 'tag', 'tiles']\n csv.write(','.join(map(str, header)) + '\\n')\n\n # Stats\n total_snapshots = len(snapshots)\n total_water_jobs = 0\n total_images = 0\n\n for snapshot_id in tqdm(snapshots.keys()):\n # Reformat the completed field\n # if snapshots[snapshot_id]['completed'] == 't':\n # snapshots[snapshot_id]['completed'] = 'true'\n # else:\n # snapshots[snapshot_id]['completed'] = 'false'\n\n # Group all the output metadata\n snapshot = snapshots[snapshot_id]\n values = [db['experiment'], snapshot['id'], snapshot['id_tag'], snapshot['car_tag'],\n snapshot['time_stamp'].strftime('%Y-%m-%d %H:%M:%S'), snapshot['weight_before'],\n snapshot['weight_after'], snapshot['water_amount'], snapshot['completed'],\n snapshot['measurement_label'], '']\n\n # If the snapshot also contains images, add them to the output\n if snapshot_id in images:\n values.append(';'.join(map(str, images[snapshot_id])))\n total_images += len(images[snapshot_id])\n # Create the local directory\n snapshot_dir = os.path.join(args.outdir, \"snapshot\" + str(snapshot_id))\n os.mkdir(snapshot_dir)\n\n for image in images[snapshot_id]:\n # Copy the raw image to the local directory\n remote_dir = os.path.join(\"/data/pgftp\", db['database'],\n snapshot['time_stamp'].strftime(\"%Y-%m-%d\"),\n \"blob\" + str(raw_images[image]['raw_image_oid']))\n local_file = os.path.join(snapshot_dir, \"blob\" + str(raw_images[image]['raw_image_oid']))\n try:\n sftp.get(remote_dir, local_file)\n except IOError as e:\n print(\"I/O error({0}): {1}. Offending file: {2}\".format(e.errno, e.strerror, remote_dir))\n\n if os.path.exists(local_file):\n # Is the file a zip file?\n if zipfile.is_zipfile(local_file):\n zf = zipfile.ZipFile(local_file)\n zff = zf.open(\"data\")\n img_str = zff.read()\n\n if 'VIS' in image or 'vis' in image:\n if len(img_str) == db['vis_height'] * db['vis_width']:\n raw = np.fromstring(img_str, dtype=np.uint8, count=db['vis_height']*db['vis_width'])\n raw_img = raw.reshape((db['vis_height'], db['vis_width']))\n img = cv2.cvtColor(raw_img, cv2.COLOR_BAYER_RG2BGR)\n if raw_images[image]['rotate_flip_type'] != 0:\n img = rotate_image(img)\n cv2.imwrite(os.path.join(snapshot_dir, image + \".png\"), img)\n os.remove(local_file)\n else:\n print(\"Warning: File {0} containing image {1} seems corrupted.\".format(local_file,\n image))\n elif 'NIR' in image or 'nir' in image:\n raw_rescale = None\n if raw_images[image]['dataformat'] == 4:\n # New NIR camera data format (16-bit)\n if len(img_str) == (db['nir_height'] * db['nir_width']) * 2:\n raw = np.fromstring(img_str, dtype=np.uint16,\n count=db['nir_height'] * db['nir_width'])\n if np.max(raw) > 4096:\n print(\"Warning: max value for image {0} is greater than 4096.\".format(image))\n raw_rescale = np.multiply(raw, 16)\n else:\n print(\"Warning: File {0} containing image {1} seems corrupted.\".format(local_file,\n image))\n elif raw_images[image]['dataformat'] == 0:\n # Old NIR camera data format (8-bit)\n if len(img_str) == (db['nir_height'] * db['nir_width']):\n raw_rescale = np.fromstring(img_str, dtype=np.uint8,\n count=db['nir_height'] * db['nir_width'])\n else:\n print(\"Warning: File {0} containing image {1} seems corrupted.\".format(local_file,\n image))\n if raw_rescale is not None:\n raw_img = raw_rescale.reshape((db['nir_height'], db['nir_width']))\n if raw_images[image]['rotate_flip_type'] != 0:\n raw_img = rotate_image(raw_img)\n cv2.imwrite(os.path.join(snapshot_dir, image + \".png\"), raw_img)\n os.remove(local_file)\n else:\n raw = np.fromstring(img_str, dtype=np.uint16, count=db['psII_height'] * db['psII_width'])\n if np.max(raw) > 16384:\n print(\"Warning: max value for image {0} is greater than 16384.\".format(image))\n raw_rescale = np.multiply(raw, 4)\n raw_img = raw_rescale.reshape((db['psII_height'], db['psII_width']))\n if raw_images[image]['rotate_flip_type'] != 0:\n raw_img = rotate_image(raw_img)\n cv2.imwrite(os.path.join(snapshot_dir, image + \".png\"), raw_img)\n os.remove(local_file)\n zff.close()\n zf.close()\n # os.remove(local_file)\n else:\n print(\"Warning: the local file {0} containing image {1} is not a proper zip file.\".format(\n local_file, image))\n else:\n print(\"Warning: the local file {0} containing image {1} was not copied correctly.\".format(\n local_file, image))\n else:\n values.append('')\n total_water_jobs += 1\n\n csv.write(','.join(map(str, values)) + '\\n')\n\n cur.close()\n conn.close()\n sftp.close()\n ssh.close()\n\n print(\"Total snapshots = \" + str(total_snapshots))\n print(\"Total water jobs = \" + str(total_water_jobs))\n print(\"Total images = \" + str(total_images))\n\n\ndef rotate_image(img):\n \"\"\"Rotate an image 180 degrees\n\n :param img: ndarray\n :return img: ndarray\n \"\"\"\n # Flip vertically\n img = cv2.flip(img, 1)\n # Flip horizontally\n img = cv2.flip(img, 0)\n\n return img\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"danforthcenter/data-science-tools","sub_path":"LT-db-extractor.py","file_name":"LT-db-extractor.py","file_ext":"py","file_size_in_byte":10018,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"44986405990","text":"\"\"\"\nMethods to build stocks and list of stocks\n\"\"\"\n\nfrom pydantic.error_wrappers import ValidationError\nfrom brfundamentus.models.stock import Stock\nfrom brfundamentus.utils.utils import parse_str_to_float\nimport math\n\n\ndef build_single_stock(\n info: list[str], headers: list[str], market_risk: float\n) -> Stock:\n \"\"\"\n Build a Stock from a list of strings\n \"\"\"\n\n map_info = {\n parameter.strip(): info[idx] for idx, parameter in enumerate(headers)\n }\n\n ticker = map_info['TICKER']\n\n # basic indicators\n price = parse_str_to_float(map_info['PRECO'])\n dy = parse_str_to_float(map_info['DY'], 100)\n price_per_profit = parse_str_to_float(map_info['P/L'])\n price_to_book = parse_str_to_float(map_info['P/VP'])\n gross_margin = parse_str_to_float(map_info['MARGEM BRUTA'], 100)\n net_margin = parse_str_to_float(map_info['MARG. LIQUIDA'], 100)\n ebit_margin = parse_str_to_float(map_info['MARGEM EBIT'], 100)\n ev_per_ebit = parse_str_to_float(map_info['EV/EBIT'])\n current_liquidity = parse_str_to_float(map_info['LIQ. CORRENTE'])\n net_debt_to_equity = parse_str_to_float(map_info['DIV. LIQ. / PATRI.'])\n roe = parse_str_to_float(map_info['ROE'], 100)\n roa = parse_str_to_float(map_info['ROA'])\n roic = parse_str_to_float(map_info['ROIC'], 100)\n cagr = parse_str_to_float(map_info['CAGR LUCROS 5 ANOS'], 100)\n advt = parse_str_to_float(map_info['LIQUIDEZ MEDIA DIARIA'], 1000000)\n bvps = parse_str_to_float(map_info['VPA'])\n eps = parse_str_to_float(map_info['LPA'])\n book_value = parse_str_to_float(map_info['VALOR DE MERCADO'], 1000000000)\n\n # extra indicators\n dps = dy * price if dy is not None else None\n payout = (\n dps / eps if dps is not None and eps is not None and eps != 0 else None\n )\n\n if roe is None or payout is None:\n expected_growth = None\n else:\n expected_growth = (1 - payout) * roe if payout != 0 else 0.2 * roe\n\n average_growth = (\n expected_growth\n if cagr is None or expected_growth is None\n else (expected_growth + cagr) / 2\n )\n\n if (\n price_per_profit is None\n or average_growth is None\n or average_growth == 0\n ):\n peg = None\n else:\n peg = price_per_profit / average_growth\n\n # valiations\n\n fair_price_graham = (\n None\n if eps is None\n or bvps is None\n or eps * bvps < 0\n or eps < 0\n or price is None\n or price == 0\n else math.sqrt(22.5 * eps * bvps)\n )\n graham_valuation = (\n None if fair_price_graham is None else (fair_price_graham / price) - 1\n )\n\n fair_price_bazin = (\n None if dps is None or price is None or price == 0 else dps / 0.06\n )\n bazin_valuation = (\n None if fair_price_bazin is None else (fair_price_bazin / price) - 1\n )\n\n fair_price_gordon = (\n None\n if cagr is None or dps is None or price is None or price == 0\n else (1 / market_risk) * dps * (1 + 0.1 * cagr)\n )\n gordon_valuation = (\n None if fair_price_gordon is None else (fair_price_gordon / price) - 1\n )\n\n stock = Stock(\n ticker=ticker,\n price=price,\n dy=dy,\n roe=roe,\n roic=roic,\n roa=roa,\n eps=eps,\n price_to_book=price_to_book,\n gross_margin=gross_margin,\n net_margin=net_margin,\n ebit_margin=ebit_margin,\n current_liquidity=current_liquidity,\n net_debt_to_equity=net_debt_to_equity,\n ev_per_ebit=ev_per_ebit,\n bvps=bvps,\n price_per_profit=price_per_profit,\n cagr=cagr,\n adtv=advt,\n book_value=book_value,\n dps=dps,\n payout=payout,\n expected_growth=expected_growth,\n average_growth=average_growth,\n peg=peg,\n fair_price_graham=fair_price_graham,\n fair_price_bazin=fair_price_bazin,\n fair_price_gordon=fair_price_gordon,\n graham_valuation=graham_valuation,\n bazin_valuation=bazin_valuation,\n gordon_valuation=gordon_valuation,\n )\n\n return stock\n\n\ndef build_list_of_stocks(\n csv_info: list[str], headers: list[str], market_risk: float\n):\n \"\"\"\n Build a list of Stock from the info of a csv file\n \"\"\"\n\n stocks = list()\n for line in csv_info:\n splited = line.split(';')\n try:\n stock = build_single_stock(splited, headers, market_risk)\n except (ValidationError, IndexError):\n continue\n stocks.append(stock)\n\n return stocks\n","repo_name":"renanmath/stock_analysis","sub_path":"brfundamentus/builders/stock_builder.py","file_name":"stock_builder.py","file_ext":"py","file_size_in_byte":4542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11337720548","text":"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom scipy import stats\r\nimport os\r\nfrom pylab import *\r\nimport scipy.interpolate as spi\r\n# 3σ原则\r\ndef method_1(data):\r\n data = data['displacement']\r\n u = data.mean()\r\n std = data.std()\r\n stats.kstest(data, 'norm', (u, std))\r\n print('Mean:%.3f,Std:%.3f' % (u, std))\r\n\r\n fig = plt.figure(figsize=(10, 6))\r\n ax1 = fig.add_subplot(3, 1, 1)\r\n data.plot(kind='kde', grid=True, style='-k', title=name + 'displacement curve')\r\n plt.axvline(3 * std, color='r', linestyle=\"--\", alpha=0.8)\r\n plt.axvline(-3 * std, color='r', linestyle=\"--\", alpha=0.8)\r\n # 绘制数据密度曲线\r\n print(name)\r\n error = data[np.abs(data - u) > 3 * std]\r\n print(error)\r\n data_c = data[np.abs(data - u) <= 3 * std]\r\n # new_x = np.arange(-np.pi, np.pi, 0.1) # 定义差值点\r\n ori_x = np.array(data_c.index)\r\n X = np.arange(0,len(ori_x),1)\r\n new_x = np.arange(0,len(data.index),1)\r\n print(new_x,data_c.values)\r\n ipo3 = spi.splrep(X, data_c.values, k=3) # 样本点导入,生成参数\r\n # ipo3 = np.array(ipo3)\r\n # print(ipo3)\r\n iy3 = spi.splev(new_x, ipo3)\r\n ax2 = fig.add_subplot(3, 1, 2)\r\n plt.scatter(data_c.index, data_c, color='k', marker='.', alpha=0.3)\r\n plt.scatter(error.index, error, color='r', marker='.', alpha=0.5)\r\n\r\n ax3 = fig.add_subplot(3, 1, 3)\r\n plt.scatter(data.index, iy3, color='b', marker='.', alpha=0.3)\r\n\r\n plt.grid()\r\n plt.show()\r\n # 图表表达\r\n# 箱型图看数据分布情况\r\n# 以内限为界\r\ndef method_2(data,data_water):\r\n data=data['displacement']\r\n fig = plt.figure(figsize=(10, 6))\r\n ax1 = fig.add_subplot(2, 1, 1)\r\n color = dict(boxes='DarkGreen', whiskers='DarkOrange', medians='DarkBlue', caps='Gray')\r\n data.plot.box(vert=False, grid=True, color=color, ax=ax1)\r\n\r\n s = data.describe()\r\n print(s)\r\n print('------')\r\n # 基本统计量\r\n\r\n q1 = s['25%']\r\n q3 = s['75%']\r\n iqr = q3 - q1\r\n mi = q1 - 1.5 * iqr\r\n ma = q3 + 1.5 * iqr\r\n\r\n ax2 = fig.add_subplot(2, 1, 2)\r\n error = data[(data < mi) | (data > ma)]\r\n data_c = data[(data >= mi) & (data <= ma)]\r\n\r\n plt.scatter(data_c.index, data_c, color='k', alpha=0.3)\r\n # plt.scatter(error.index, error, color='r', marker='.', alpha=0.5)\r\n plt.scatter(data_water.index, data_water, color='b', alpha=0.5)\r\n\r\n # plt.xlim([-10, 10010])\r\n # plt.grid()\r\n\r\n\r\ndir_path='../JCK08\\\\'\r\n\r\ndir=os.listdir(dir_path)\r\nnum=len(dir)\r\nprint(num)\r\nmpl.rcParams[\"font.sans-serif\"] = [\"SimHei\"]\r\n\r\nfor name in dir[0:num-1]:\r\n file_name=dir_path+name\r\n print(file_name)\r\n if name =='date40.xlsx' :\r\n continue\r\n data=pd.read_excel(file_name,index_col=0)\r\n # → p(|x - μ| > 3σ) ≤ 0.003\r\n method_1(data)\r\n # 箱型图分析\r\n # method_2(data,data_water)\r\nplt.show()","repo_name":"cnn2416/LSTM-Prophet","sub_path":"3σ.py","file_name":"3σ.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"24861157546","text":"#! /usr/bin/python\n# encoding:utf-8\n\n# 导入Tornado模块\nimport tornado.ioloop\nimport tornado.httpserver\nimport tornado.web\nimport tornado.options\nimport RPi.GPIO as GPIO\nimport serial #import serial module\ntry:\n ser = serial.Serial('/dev/ttyACM1', 9600,timeout=1) #open named port at 9600,1s timeot\nexcept:\n ser = serial.Serial('/dev/ttyACM0', 9600,timeout=1)\n\nfrom tornado.options import define, options\n\ndefine(\"port\", type=int, default=12345, help=\"run on the given port\")\n\ndef curtain_close():\n global ser\n ser.write(str.encode('a'))#writ a string to port\n return 0\n\ndef curtain_open():\n global ser\n ser.write(str.encode('b'))#writ a string to port\n return 0\n\nclass IndexHandler(tornado.web.RequestHandler):\n def get(self):\n status = 'Choose your operation'\n self.render('index.html', status=status)\n\nclass CurtainOpenHandler(tornado.web.RequestHandler):\n def get(self):\n curtain_open()\n status = 'Curtain has been opened!'\n self.render('index.html', status=status)\n\nclass CurtainCloseHandler(tornado.web.RequestHandler):\n def get(self):\n curtain_close()\n status = 'Curtain is now closed!'\n self.render('index.html', status=status)\n\nclass ImageHandler(tornado.web.StaticFileHandler):\n def set_extra_headers(self, path):\n self.set_header(\"Cache-control\", \"no-cache\")\n\n# urls = [(r\"/\", IndexHandler),(r\"/open\", CurtainOpenHandler),(r\"/close\", CurtainCloseHandler),(r\"/(pic.png)\", tornado.web.StaticFileHandler, {'path':'./'})]\nsettings = {\"debug\": True,}\nurls = [(r\"/\", IndexHandler),(r\"/open\", CurtainOpenHandler),(r\"/close\", CurtainCloseHandler),(r\"/(cap.jpeg)\", ImageHandler, {'path':'./'}),]\n\ndef web_server():\n tornado.options.parse_command_line()\n app = tornado.web.Application(urls, **settings)\n app.listen(options.port)\n tornado.ioloop.IOLoop.current().start()\n\n\nif __name__ == \"__main__\":\n web_server()\n\n","repo_name":"HugoFishx/SmartCurtain","sub_path":"WebServer.py","file_name":"WebServer.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"9126519025","text":"import argparse\n\nimport pandas as pd\npd.set_option( 'display.max_columns', 500 )\npd.set_option( 'display.width', 1000 )\n\nimport sys\nsys.path.append( '../util' )\nimport util\nimport normalize\n\nADDR = util.NORMALIZED_ADDRESS\nSTREET_NUMBER = util.NORMALIZED_STREET_NUMBER\nSTREET_NAME = util.NORMALIZED_STREET_NAME\nOCCUPANCY = util.NORMALIZED_OCCUPANCY\nADDITIONAL = util.NORMALIZED_ADDITIONAL_INFO\n\n# Main program\nif __name__ == '__main__':\n\n # Retrieve and validate arguments\n parser = argparse.ArgumentParser( description='Generate Building Permits table' )\n parser.add_argument( '-m', dest='master_filename', help='Master database filename' )\n parser.add_argument( '-p', dest='permit_type', help='Permit type fragment in the raw table name' )\n args = parser.parse_args()\n\n # Open the master database\n conn, cur, engine = util.open_database( args.master_filename, False )\n\n if args.permit_type is not None:\n suffix = '_' + args.permit_type\n else:\n suffix = ''\n\n # Retrieve table from database\n df_left = pd.read_sql_table( 'RawBuildingPermits' + suffix, engine, index_col=util.ID, parse_dates=True )\n\n # Clean up before processing\n df_left = df_left.drop_duplicates( subset=[util.PERMIT_NUMBER], keep='last' )\n\n # Normalize addresses. Use result_type='expand' to load multiple columns!\n df_left[ADDR] = df_left[util.ADDRESS]\n df_left[[ADDR,STREET_NUMBER,STREET_NAME,OCCUPANCY,ADDITIONAL]] = df_left.apply( lambda row: normalize.normalize_address( row, ADDR, city='LAWRENCE', return_parts=True ), axis=1, result_type='expand' )\n\n # Merge left dataframe with assessment data\n table_name = 'BuildingPermits_L' + suffix\n df_result = util.merge_with_assessment_data( table_name, df_left, sort_by=[util.PERMIT_NUMBER], drop_subset=[util.PERMIT_NUMBER], engine=engine )\n\n # Create table in database\n util.create_table( table_name, conn, cur, df=df_result )\n\n util.report_elapsed_time()\n","repo_name":"navkal/el","sub_path":"populators/lawrence_building_permits.py","file_name":"lawrence_building_permits.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34482276200","text":"import os\nfrom boto.s3.connection import S3Connection\nimport pickle\nimport spacy\nimport numpy as np\nimport warnings\nimport Comment\nimport dill\n\ndef add_to_cache(filepath, key):\n \"\"\"\n Saves a local file to S3.\n filepath is the full local path to the file.\n key is the S3 key to use in ds_cache bucket\n \"\"\"\n if is_s3_cache_available():\n print(\"saving to s3\")\n s3_key(key, new=True).set_contents_from_filename(filepath)\n \ndef write_obj_to_cache(obj, filepath, key, use_s3=True):\n \"\"\"\n Writes a python object to a file, and also stores that file in S3.\n filepath is the full local path to the file.\n key is the S3 key to use in ds_cache bucket\n \"\"\"\n pickle.dump(obj, open(filepath, \"wb\"))\n add_to_cache(filepath, key)\n\ndef is_s3_cache_available():\n \"\"\"\n Return True if a connection can be made to S3 in the current environment\n \"\"\"\n try:\n S3Connection(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)\n return True\n except:\n print(\"WARNING: Unable to connect to s3\")\n return False\ndef s3_key(key, new=False):\n \"\"\"\n key is the S3 key in the ds_cache bucket. This function returns a reference\n to the boto.s3.Key object corresponding to the key parameter.\n If new=True, create a new key. Otherwise return an existing key.\n If the key doesn't exist, return None\n \"\"\"\n s3 = S3Connection(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)\n cache_bucket = s3.get_bucket('ds_cache')\n if new:\n return cache_bucket.new_key(key)\n return cache_bucket.get_key(key)\n\ndef load_cache(filepath, key):\n \"\"\"\n Loads file into local cache and returns the path. Returns None if the file\n is not available.\n filepath is the full local path to the file.\n key is the S3 key to use in ds_cache bucket\n \"\"\"\n #if os.path.exists(filepath):\n # print(\"file exists in cache\")\n # return filepath\n if is_s3_cache_available():\n if s3_key(key) is not None:\n print(\"transferring from s3\")\n s3_key(key).get_contents_to_filename(filepath)\n return filepath\n return None\n\ndef read_obj_from_cache(filepath, key):\n \"\"\"\n Reads object from local cache. Returns None if the file\n is not available.\n filepath is the full local path to the file.\n key is the S3 key to use in ds_cache bucket\n \"\"\"\n in_cache = load_cache(filepath, key)\n if in_cache:\n return pickle.load(open(in_cache, \"rb\"))\n return None\n\n\nAWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']\nAWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']\n\nclass Model(object):\n\tdef __init__(self):\n\t\tself.key = None\n\t\tself.path = None\n\t\tself.container = None\n\t\tself.ID = None\n\t\tself.model = None\n\t\tself.type = None\n\t\tself.package_version = None\n\t\tself.preprocessing_dag = None\n\t\tself.comments = None\n\t\tself.is_loaded = False\n\n\tdef load(self, ID):\n\t\tif ID == 'sentiment':\n\t\t\tself.key = s3_key('{}-model'.format(ID))\n\t\t\tself.path = '{}-model.pkl'.format(ID)\n\t\t\tstorage_object = read_obj_from_cache(self.path, self.key)\n\t\t\tself.__package_old__(storage_object)\n\t\t\tdel storage_object\n\t\t\t\n\t\telse:\n\t\t\twarnings.warn(\"Model not found\")\n\t\t\treturn None\n\n\tdef __package_old__(self, storage_object):\n\t\tself.ID = storage_object.ID\n\t\tself.comments = storage_object.comments\n\t\tself.model = storage_object.model\n\t\tself.type = storage_object.type\n\t\tself.package_version = storage_object.package_version\n\t\tself.preprocessing_dag = storage_object.preprocessing_dag\n\t\tself.key = storage_object.key\n\t\tself.path = storage_object.path\n\t\tself.is_loaded = True\t\t\n\n\tdef package_new(self, ID, model, type, version, dag):\n\t\tself.ID = ID\n\t\tself.comments = Comment.Thread()\n\t\tself.model = model\n\t\tself.type = type\n\t\tself.package_version = version\n\t\tself.preprocessing_dag = dag\n\t\tself.key = s3_key('{}-model'.format(ID), new=True)\n\t\tself.path = '{}-model.pkl'.format(ID)\n\t\tself.is_loaded = True\n\n\tdef predict(self, x):\n\t\tif self.is_loaded:\n\t\t\tprocessor = self.preprocessing_dag()\n\t\t\tif hasattr(x, '__iter__'):\n\t\t\t\tresults = np.array(map(lambda t: self.model.predict_proba(processor(t))[0],x))\n\t\t\telse:\n\t\t\t\tresults = np.array(self.model.predict_proba(processor(x))[0])\n\t\t\treturn results\n\t\telse:\n\t\t\traise ValueError(\"Model not loaded\")\n\n\n\tdef save(self):\n\t\tif self.ID is None:\n\t\t\traise ValueError(\"You need to load or package a model\")\n\t\telse:\n\t\t\twrite_obj_to_cache(self, self.path, self.key)","repo_name":"pramitchoudhary/Experiments","sub_path":"notebook_gallery/other_experiments/explore-models/modelinterpretation/lime/interpret-your-sentiment-model/ds/models/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4410,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"16112422125","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport os\n\n### Mode-Adjuster ###\n\nfilemode = 1 # 1 for whThrower6 folder, 0 for one file\nsavemode = \"csv\" # \"plot\" for saving plots, \"csv\" for saving csvs\n\n######\n\n\ncolumn_x = 'accelerometerAccelerationX(G)'\ncolumn_y = 'accelerometerAccelerationY(G)'\ncolumn_z = 'accelerometerAccelerationZ(G)'\ntimestamp = 'accelerometerTimestamp_sinceReboot(s)'\ndirPath = \"\"\ncount = 1\n\n\ndef findFile(filemode):\n x = 1\n while x == 1:\n global dirPath\n folder = input('Welcher Ordner soll verwendet werden werden? ')\n dirPath = \"../data/\" + folder + \"/csv/\"\n if not os.path.isdir(dirPath):\n print(\"Ordner existiert nicht\")\n else:\n x = 0\n if filemode == 0:\n file = input('Welche Datei soll verwendet werden? ')\n file_list = file\n # files = dirPath + \"/csv/\" + file\n else:\n file_list = os.listdir(dirPath)\n # files = [dirPath+ \"/csv/\" + file for file in os.listdir(dirPath)]\n # print(files)\n # print(files)\n return file_list\n\n\ndef readCsv(file):\n # print(file)\n print(dirPath + file)\n if os.path.isfile(dirPath + file):\n df = pd.read_csv(dirPath + file)\n # df.info()\n x = 0\n print(\"----\" + file + \"----\")\n else:\n print(\"Falscher Dateinname\")\n return df\n\n\ndef findThrow(df_raw):\n df_raw[column_x] = df_raw[column_x] * 9.81\n df_raw[column_y] = df_raw[column_y] * 9.81\n df_raw[column_z] = df_raw[column_z] * 9.81\n clap = df_raw[df_raw[column_z].abs().gt(30)].index[0]\n dfThrow = df_raw[clap + 50:]\n throwStart = dfThrow[dfThrow[column_z].abs().gt(30)].index[0] - 10\n throwEnd = throwStart + 60\n df = df_raw[throwStart:throwEnd]\n return df\n\n\ndef genPlot(df, file):\n modus = \"Beschleunigung (Wurf)\"\n plt.title(modus + \": \" + file)\n plt.plot(df[timestamp], df[column_x], label='x')\n plt.plot(df[timestamp], df[column_y], label='y')\n plt.plot(df[timestamp], df[column_z], label='z')\n plt.legend()\n\n\ndef saveCsv(df, file):\n global count\n csvDirPath = dirPath[:-5] + \"/throwCsv/\"\n if not os.path.isdir(csvDirPath):\n os.mkdir(csvDirPath)\n print(\"csvThrow Ordner erstellt!\")\n csvPath = csvDirPath + file[:-4] + \"_\" + \"Beschleunigung_(Wurf)\" + \".csv\"\n df.to_csv(csvPath)\n if os.path.isfile(csvPath):\n print(str(count) + \": CSV erfolgreich gespeichert!\")\n count += 1\n\n\ndef savePng(file):\n global count\n plotDirPath = dirPath[:-5] + \"/throwPlot/\"\n if not os.path.isdir(plotDirPath):\n os.mkdir(plotDirPath)\n print(\"Plot-Ordner erstellt: \" + plotDirPath)\n plotPath = plotDirPath + file[:-4] + \"_\" + \"Beschleunigung\" + \".png\"\n plt.savefig(plotPath)\n if os.path.isfile(plotPath):\n print(str(count) + \": Plot erfolgreich gespeichert!\")\n count += 1\n plt.close()\n\n\ndef main():\n if filemode == 0:\n try:\n if savemode == \"csv\":\n file = findFile(filemode)\n saveCsv(findThrow(readCsv(file)), file)\n elif savemode == \"plot\":\n file = findFile(filemode)\n genPlot(findThrow(readCsv(file)), file)\n savePng(file)\n plt.close()\n except Exception as e:\n print (e)\n\n elif filemode == 1:\n files = findFile(filemode)\n if savemode == \"csv\":\n for file in files:\n try:\n saveCsv(findThrow(readCsv(file)), file)\n except Exception as e:\n print(e)\n elif savemode == \"plot\":\n for file in files:\n try:\n genPlot(findThrow(readCsv(file)), file)\n savePng(file)\n plt.close()\n except Exception as e:\n print(e)\n\n\nmain()\n","repo_name":"FionaLys/MotionAnalytica","sub_path":"MotionAnalytica/Data Preprocessing/extract_throw_from_raw_data.py","file_name":"extract_throw_from_raw_data.py","file_ext":"py","file_size_in_byte":3960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"72645651849","text":"import sys, re, traceback\n\n# pylint: disable=E0611\nfrom pytest import raises, xfail\n\nfrom multiconf import mc_config, ConfigItem, RepeatableConfigItem, ConfigException\n\nfrom multiconf.decorators import named_as, nested_repeatables\nfrom multiconf.envs import EnvFactory\n\nfrom .utils.utils import local_func, py37_no_exc_comma, config_error, next_line_num, replace_ids\n\n\ndef ce(line_num, *lines):\n return config_error(__file__, line_num, *lines)\n\n\nef = EnvFactory()\npp = ef.Env('pp')\nprod = ef.Env('prod')\n\n\ndef test_attribute_overrides_property_method_config_item_ok():\n @named_as('someitem')\n class Nested(ConfigItem):\n @property\n def m(self):\n return 1\n\n @mc_config(ef, load_now=True)\n def config0(_):\n with Nested() as nn:\n nn.setattr('m', default=7, mc_overwrite_property=True)\n\n cr = config0(prod)\n assert cr.someitem.m == 7\n\n @mc_config(ef, load_now=True)\n def config1(_):\n with Nested() as nn:\n nn.setattr('m', prod=7, mc_overwrite_property=True)\n\n cr = config1(prod)\n assert cr.someitem.m == 7\n\n @mc_config(ef, load_now=True)\n def config2(_):\n with Nested() as nn:\n nn.setattr('m', pp=7, mc_overwrite_property=True)\n\n cr = config2(prod)\n assert cr.someitem.m == 1\n\n\ndef test_attribute_overrides_property_method_repeatable_config_item_ok():\n @named_as('someitems')\n class Rep(RepeatableConfigItem):\n @property\n def m(self):\n return 1\n\n @named_as('root')\n @nested_repeatables('someitems')\n class Root(ConfigItem):\n pass\n\n @mc_config(ef, load_now=True)\n def config0(_):\n with Root():\n with Rep('r1') as rr:\n rr.setattr('m', default=7, mc_overwrite_property=True)\n Rep('r2')\n\n cr = config0(prod).root\n assert cr.someitems['r1'].m == 7\n assert cr.someitems['r2'].m == 1\n\n @mc_config(ef, load_now=True)\n def config1(_):\n with Root():\n with Rep('r1') as rr:\n rr.setattr('m', prod=7, mc_overwrite_property=True)\n Rep('r2')\n\n cr = config1(prod).root\n assert cr.someitems['r1'].m == 7\n assert cr.someitems['r2'].m == 1\n\n @mc_config(ef, load_now=True)\n def config2(_):\n with Root():\n with Rep('r1') as rr:\n rr.setattr('m', pp=7, mc_overwrite_property=True)\n Rep('r2')\n\n cr = config2(prod).root\n assert cr.someitems['r1'].m == 1\n assert cr.someitems['r2'].m == 1\n\n\ndef test_attribute_overrides_property_inherited_method():\n @named_as('someitem')\n class NestedBase(ConfigItem):\n @property\n def m(self):\n return 1\n\n class Nested(NestedBase):\n pass\n\n @mc_config(ef, load_now=True)\n def config0(_):\n with Nested() as nn:\n nn.setattr('m', default=7, mc_overwrite_property=True)\n\n cr = config0(prod)\n assert cr.someitem.m == 7\n\n @mc_config(ef, load_now=True)\n def config1(_):\n with Nested() as nn:\n nn.setattr('m', prod=7, mc_overwrite_property=True)\n\n cr = config1(prod)\n assert cr.someitem.m == 7\n\n @mc_config(ef, load_now=True)\n def config2(_):\n with Nested() as nn:\n nn.setattr('m', pp=7, mc_overwrite_property=True)\n\n cr = config2(prod)\n assert cr.someitem.m == 1\n\n\ndef test_attribute_overrides_property_method_not_existing(capsys):\n errorline = [None]\n\n @named_as('someitem')\n class Nested(ConfigItem):\n pass\n\n with raises(ConfigException) as exinfo:\n @mc_config(ef, load_now=True)\n def config(_):\n with Nested() as nn:\n errorline[0] = next_line_num()\n nn.setattr('m', default=7, mc_overwrite_property=True)\n\n sout, serr = capsys.readouterr()\n assert sout == ''\n assert serr == ce(errorline[0], \"'mc_overwrite_property' is True but no property named 'm' exists.\")\n\n\ndef test_attribute_overrides_property_method_is_regular_method(capsys):\n errorline = [None]\n\n @named_as('someitem')\n class Nested(ConfigItem):\n def m(self):\n return 2\n\n with raises(ConfigException) as exinfo:\n @mc_config(ef, load_now=True)\n def config(_):\n with Nested() as nn:\n errorline[0] = next_line_num()\n nn.setattr('m', default=7, mc_overwrite_property=True)\n\n _sout, serr = capsys.readouterr()\n msg = re.sub(r\"m at [^>]*>\", \"m at 1234>\", str(serr))\n expected = \"'mc_overwrite_property' specified but existing attribute 'm' with value '' is not a @property.\" % \\\n dict(local_func=local_func())\n assert msg == ce(errorline[0], expected)\n\n\ndef test_setattr_replace_property_in_with_not_allowed(capsys):\n errorline = [None]\n\n @named_as('someitem')\n class Nested(ConfigItem):\n def __init__(self):\n super().__init__()\n\n @property\n def m(self):\n return 2\n\n with raises(ConfigException) as exinfo:\n @mc_config(ef, load_now=True)\n def config(_):\n with Nested() as nn:\n errorline[0] = next_line_num()\n nn.setattr('m', default=7)\n\n _sout, serr = capsys.readouterr()\n exp = \"The attribute 'm' clashes with a @property or method and 'mc_overwrite_property' is False.\"\n assert serr == ce(errorline[0], exp)\n\n\ndef test_assigment_replace_property_in_init_not_allowed(capsys):\n errorline = [None]\n\n @named_as('someitem')\n class Nested(ConfigItem):\n def __init__(self, m=None):\n super().__init__()\n errorline[0] = next_line_num()\n self.m = m\n\n @property\n def m(self):\n return 2\n\n with raises(ConfigException) as exinfo:\n @mc_config(ef, load_now=True)\n def config(_):\n Nested(m=7)\n\n _sout, serr = capsys.readouterr()\n print(serr)\n exp = \"The attribute 'm' clashes with a @property or method. Use item.setattr with mc_overwrite_property=True if overwrite intended.\"\n assert serr == ce(errorline[0], exp)\n\n\ndef test_assigment_replace_property_in_with_not_allowed(capsys):\n errorline = [None]\n\n @named_as('someitem')\n class Nested(ConfigItem):\n @property\n def mm(self):\n return 1\n\n with raises(Exception) as exinfo:\n @mc_config(ef, load_now=True)\n def config(_):\n with Nested() as nn:\n errorline[0] = next_line_num()\n nn.mm = 7\n\n _sout, serr = capsys.readouterr()\n exp = \"The attribute 'mm' clashes with a @property or method. Use item.setattr with mc_overwrite_property=True if overwrite intended.\"\n assert serr == ce(errorline[0], exp)\n\n\ndef test_assigment_replace_mc_property_wrapper_not_allowed(capsys):\n errorline = [None]\n\n @named_as('someitem')\n class Nested(ConfigItem):\n @property\n def mm(self):\n return 1\n\n with raises(Exception) as exinfo:\n @mc_config(ef, load_now=True)\n def config2(_):\n with Nested() as nn:\n nn.setattr('mm', prod=3, mc_overwrite_property=True)\n errorline[0] = next_line_num()\n nn.mm = 7\n\n _sout, serr = capsys.readouterr()\n exp = \"The attribute 'mm' clashes with a @property or method. Use item.setattr with mc_overwrite_property=True if overwrite intended.\"\n assert serr == ce(errorline[0], exp)\n xfail(\"Not an ideal error message\")\n\n\ndef test_replace_mc_property_wrapper_not_allowed(capsys):\n errorline = [None]\n\n @named_as('someitem')\n class Nested(ConfigItem):\n @property\n def mm(self):\n return 1\n\n with raises(Exception) as exinfo:\n @mc_config(ef, load_now=True)\n def config2(_):\n with Nested() as nn:\n nn.setattr('mm', prod=3, mc_overwrite_property=True)\n errorline[0] = next_line_num()\n nn.setattr('mm', prod=7, mc_overwrite_property=True)\n\n _sout, serr = capsys.readouterr()\n exp = \"The attribute 'mm' is already fully defined.\"\n assert serr == ce(errorline[0], exp)\n\n\n_attribute_overrides_failing_property_method_exp = \"\"\"{\n \"__class__\": \"NestedBadM #as: 'someitem', id: 0000\",\n \"env\": {\n \"__class__\": \"Env\",\n \"name\": \"prod\"\n },\n \"m #no value for Env('prod')\": true,\n \"m #json_error trying to handle property method\": \"Exception('bad property method'%(comma)s)\"\n}, object of type: has no attribute 'm'.\n\"\"\".strip()\n\ndef test_attribute_overrides_failing_property_method():\n errorline = [None]\n\n @named_as('someitem')\n class NestedBadM(ConfigItem):\n @property\n def m(self):\n errorline[0] = next_line_num()\n raise Exception(\"bad property method\")\n\n @mc_config(ef)\n def config0(_):\n with NestedBadM() as nn:\n nn.setattr('m', prod=7, mc_overwrite_property=True)\n\n cr = config0.load(validate_properties=False)(prod)\n assert cr.someitem.m == 7\n\n @mc_config(ef)\n def config1(_):\n with NestedBadM() as nn:\n nn.setattr('m', pp=7, mc_overwrite_property=True)\n\n cr = config1.load(validate_properties=False)(prod)\n with raises(AttributeError) as exinfo:\n print(cr.someitem.m)\n\n origin_line_exp = 'raise Exception(\"bad property method\")'\n\n # TODO\n # print('XXX __context__', dir(exinfo.value.__context__))\n # print('XXX __cause__', dir(exinfo.value.__cause__))\n\n # ctx = exinfo.value.__context__\n # while True:\n # if not ctx.__context__:\n # break\n # ctx = ctx.__context__\n # print('ctx:', ctx.__traceback__)\n\n # tb = traceback.extract_tb(ctx.__traceback__)\n # for origin in tb:\n # print(origin)\n # origin = tb[-1]\n # filename, lineno, function_name, line = origin\n # assert filename == __file__\n # assert lineno == errorline[0]\n # assert function_name == 'm'\n # assert line == origin_line_exp\n\n exp = _attribute_overrides_failing_property_method_exp % dict(local_func=local_func(), comma=py37_no_exc_comma)\n exp += \" Attribute 'm' is defined as a multiconf attribute and as a @property method but value is undefined for Env('prod') and @property method call failed with: Exception('bad property method'{comma})\".format(comma=py37_no_exc_comma)\n\n print('exp:', exp)\n got = replace_ids(str(exinfo.value), named_as=False)\n print('got:', got)\n assert exp in got\n\n xfail(\"TODO: improve message, improve json\")\n\n\ndef test_attribute_overrides_property_method_raising_attribute_error():\n @named_as('someitem')\n class Nested(ConfigItem):\n @property\n def m(self):\n \"\"\"This raises AttributeError, a common scenario when calling a @property during config load\"\"\"\n print(\"test_attribute_overrides_property_method_raising_attribute_error, @property m, raises AttributeError.\")\n return self.i_dont_have_this_attribute\n\n @mc_config(ef)\n def config1(_):\n with Nested() as nn:\n nn.setattr('m', prod=7, mc_overwrite_property=True)\n\n cr = config1.load(validate_properties=False)(prod)\n assert cr.someitem.m == 7\n\n @mc_config(ef)\n def config2(_):\n with Nested() as nn:\n nn.setattr('m', pp=7, mc_overwrite_property=True)\n\n nn = config2.load(validate_properties=False)(prod).someitem\n with raises(AttributeError) as exinfo:\n mmm = nn.m\n print(mmm)\n\n ex_msg = str(exinfo.value)\n print(ex_msg)\n assert \"Attribute 'm' is defined as a multiconf attribute and as a @property method\" in ex_msg\n assert \"value is undefined for Env('prod') and @property method call failed\" in ex_msg\n assert \"\"\"AttributeError(\"'Nested' object has no attribute 'i_dont_have_this_attribute'\"%(comma)s)\"\"\" % dict(comma=py37_no_exc_comma) in ex_msg\n\n\ndef test_attribute_overrides_property_method_using_mc_set_unknown_repeated_env(capsys):\n errorline = [None]\n\n @named_as('someitem')\n class Nested(ConfigItem):\n @property\n def m(self):\n return 2\n\n with raises(ConfigException) as exinfo:\n @mc_config(ef, load_now=True)\n def config(_):\n with Nested() as nn:\n errorline[0] = next_line_num()\n nn.setattr('m', pp=7, mc_overwrite_property=True)\n nn.setattr('m', pp=17, mc_set_unknown=True)\n\n sout, serr = capsys.readouterr()\n exp = \"Attempting to use 'mc_set_unknown' to overwrite a an existing @property 'm'.\"\n assert exp in serr\n assert not sout\n","repo_name":"lhupfeldt/multiconf","sub_path":"test/attribute_override_property_test.py","file_name":"attribute_override_property_test.py","file_ext":"py","file_size_in_byte":12624,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"15819637141","text":"# -*- coding: utf-8 -*-\nfrom kivy.lang import Builder\nfrom kivy.properties import BoundedNumericProperty, ReferenceListProperty, ListProperty,BooleanProperty\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivymd.elevationbehavior import ElevationBehavior\nfrom kivymd.theming import ThemableBehavior\nfrom kivy.metrics import dp\nfrom kivy.uix.widget import Widget\n\nBuilder.load_string('''\n\n canvas:\n Color:\n rgba: self.background_color\n RoundedRectangle:\n size: self.size\n pos: self.pos\n radius: [self.border_radius]\n Color:\n rgba: self.theme_cls.divider_color\n a: self.border_color_a\n Line:\n rounded_rectangle: (self.pos[0],self.pos[1],self.size[0],self.size[1],self.border_radius) \n background_color: self.theme_cls.bg_light\n \n\n canvas:\n Color:\n rgba: self.theme_cls.divider_color\n Rectangle:\n size: self.size\n pos: self.pos\n''')\n\n\nclass MDSeparator(ThemableBehavior, BoxLayout):\n \"\"\" A separator line \"\"\"\n def __init__(self, *args, **kwargs):\n super(MDSeparator, self).__init__(*args, **kwargs)\n self.on_orientation()\n \n def on_orientation(self,*args):\n self.size_hint = (1, None) if self.orientation == 'horizontal' else (None, 1)\n if self.orientation == 'horizontal':\n self.height = dp(1)\n else:\n self.width = dp(1)\n\n\nclass MDCard(ThemableBehavior, ElevationBehavior, BoxLayout):\n r = BoundedNumericProperty(1., min=0., max=1.)\n g = BoundedNumericProperty(1., min=0., max=1.)\n b = BoundedNumericProperty(1., min=0., max=1.)\n a = BoundedNumericProperty(0., min=0., max=1.)\n \n border_radius = BoundedNumericProperty(dp(3),min=0)\n border_color_a = BoundedNumericProperty(0, min=0., max=1.)\n background_color = ReferenceListProperty(r, g, b, a)\n","repo_name":"Joelzeller/DigitalRaceDashSlim","sub_path":"kivymd/card.py","file_name":"card.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","stars":161,"dataset":"github-code","pt":"16"} +{"seq_id":"70253666888","text":"#!/usr/bin/python3\n\"\"\"a module with a canUnlock function\"\"\"\n\n\ndef canUnlockAll(boxes):\n \"\"\"determines wether all boxes can be unlocked\"\"\"\n # Special case: If there are no boxes or only one box, it is always True.\n if not boxes or len(boxes) == 1:\n return True\n\n n = len(boxes)\n visited = set()\n queue = [0] # Start with the first box (index 0).\n visited.add(0)\n\n while queue:\n current_box = queue.pop(0)\n\n for key in boxes[current_box]:\n if key not in visited and key < n:\n queue.append(key)\n visited.add(key)\n\n return len(visited) == n\n","repo_name":"Mmah-Zombo/alx-interview","sub_path":"0x01-lockboxes/0-lockboxes.py","file_name":"0-lockboxes.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"39639708378","text":"\nn = 0\nc = -1\ntotal = n\n\nwhile n != 999:\n n = int(input('Digite um número: [999] para parar.'))\n c += 1\n total += n\n\ntotal -= 999\nprint(f'Você digitou {c} números e a soma entre eles é de {total}')\n","repo_name":"KaioVinicios/CeV_Python","sub_path":"Python/Mundo 1_2_3/des064.py","file_name":"des064.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37380460581","text":"class Angle:\n def __init__(\n self,\n description: str,\n organ1: str,\n organ2: str,\n organ3: str,\n organ4: str,\n angle1: str,\n angle2: str,\n ):\n self.description = description\n # 1 2 两个点组成一个直线\n self.organ1 = organ1\n self.organ2 = organ2\n # 3 4 两个点组成一个直线\n self.organ3 = organ3\n self.organ4 = organ4\n # 在角度 1,2 之间\n self.angle1 = angle1\n self.angle2 = angle2\n","repo_name":"Mr-xiaobing/CMSGS","sub_path":"src/entity/angle.py","file_name":"angle.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":90,"dataset":"github-code","pt":"16"} +{"seq_id":"3194668216","text":"import timeit\r\n\r\n# 合計値計算関数1\r\ndef func1(nums):\r\n total = 0\r\n for i in nums:\r\n total += i\r\n return total\r\n\r\n\r\n# 合計値計算関数2\r\ndef func2(nums):\r\n return sum(nums)\r\n\r\n\r\n# 1から10000までのリストを作る\r\nnum_list = [i for i in range(1, 10001)]\r\n\r\n# 演習1\r\n# 下記のプログラムは、それぞれ「関数1」「関数2」「関数を使わずにそのままsumで計算」\r\n# としたもので結果は同じです。それぞれにかかる時間をtimeitで出力してください。\r\n# すべて、実行回数は「1万回」を指定してください。\r\nresult1 = timeit.timeit(\"func1(num_list)\", globals=globals(), number=10000)\r\nresult2 = timeit.timeit(\"func2(num_list)\", globals=globals(), number=10000)\r\nresult3 = timeit.timeit(\"sum(num_list)\", globals=globals(), number=10000)\r\n\r\nprint(func1(num_list))\r\nprint(func2(num_list))\r\nprint(sum(num_list))\r\nprint(\"----------------\")\r\n\r\n\r\n# 計測結果(かかった秒数)を出力\r\nprint(result1)\r\nprint(result2)\r\nprint(result3)\r\n","repo_name":"Masaru-DaL/School","sub_path":"term.3/PythonProgramming_c/2022.10.20/01_performance/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4151202629","text":"import os\nimport sys\nimport string\nimport pyrax\nimport pyrax.exceptions as exc\nimport time\n\nprint(\"Using credentials file: ~/.rackspace_cloud_credentials\")\ncred_file = os.path.expanduser(\"~/.rackspace_cloud_credentials\")\ntry:\n pyrax.set_credential_file(cred_file)\nexcept exc.AuthenticationFailed:\n print(\"Did you remember to replace the credential file with your actual\" +\n \"username and api_key?\")\n\nif pyrax.identity.authenticated:\n print(\"Successfully authenticated.\")\nelse:\n print(\"Authentication failed. Exiting...\")\n sys.exit(1)\n\n# variable to hold the count of how many servers will be built\nsrv_count = 3\n\nprint(\"You are about to create three 512MB Ubuntu 12.04 LTS Cloud Servers.\")\nprint(\"Please enter the servers' base name (ie. type: web to create servers\" +\n \"web1, web2, web3, ...)\")\nprint(\"Base name:\"),\ncs_base_name = raw_input()\n\nprint(\"Server names will be:\")\nfor i in range(1, srv_count + 1):\n print(cs_base_name + str(i))\n\nprint(\"Proceed with creating server instances? y/n:\"),\nanswer = raw_input()\n\nif answer == \"y\":\n # go\n cs = pyrax.cloudservers\n # grab the Ubuntu 12.04 LTS image\n for img in cs.images.list():\n if \"Ubuntu 12.04 LTS\" in img.name:\n cs_image = img\n # grab the 512NB flavor\n for flv in cs.flavors.list():\n if flv.ram == 512:\n cs_flavor = flv\n # create matrix to hold server information\n server_matrix = []\n\n for s in range(1, srv_count + 1):\n current_name = cs_base_name + str(s)\n print(\"Creating server: \" + current_name)\n # print(\"Image:\" + cs_image.id\n # print(\"Flavor:\" + cs_flavor.id\n # Create server:\n server = cs.servers.create(current_name, cs_image.id, cs_flavor.id)\n # Add server information to matrix\n server_matrix.append([str(server.id), str(server.name),\n str(server.adminPass), \"\"])\n print(\"Servers are building...waiting to obtain IP information.\")\n\n received_ips = False\n count_done = 0\n while not received_ips:\n print(\"Not all IPs have been assigned. (\" + str(count_done) + \"/\" +\n str(srv_count) + \") Sleeping for 30 seconds.\")\n time.sleep(30)\n received_ips = True\n # get server list and populate server_matrix with IPs\n for y in cs.servers.list():\n index = 0\n for s in server_matrix:\n if y.id == server_matrix[index][0]:\n # print(i.networks\n for k, v in y.networks.iteritems():\n if k == \"public\":\n if len(v[0]) > 15:\n # print(v[1])\n server_matrix[index][3] = str(v[1])\n else:\n # print(v[0])\n server_matrix[index][3] = str(v[0])\n count_done += 1\n index += 1\n count_done = 0\n for x in server_matrix:\n # print(x)\n if len(x[3]) < 1:\n received_ips = False\n else:\n count_done += 1\n print(\"\")\n print(\"[' uuid ', ' name ',\" +\n \" 'root password', 'IP address']\")\n for x in server_matrix:\n print(x)\nelse:\n print(\"Aborting.\")\nprint(\"\")\nprint(\"Done.\")\nprint(\"\")\n","repo_name":"theneykov/rax-api-scripts","sub_path":"challenge1.py","file_name":"challenge1.py","file_ext":"py","file_size_in_byte":3401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"42251391531","text":"\r\n# import pandas as pd\r\n# import numpy as np\r\n# import matplotlib.pyplot as plt\r\n# import seaborn as sns\r\n\r\n# from urllib.request import urlopen\r\n# from bs4 import BeautifulSoup\r\n# url = \"https://pythonprogramming.net/beginner-python-programming-tutorials/\"\r\n# html = urlopen(url)\r\n# soup = BeautifulSoup(html, 'lxml')\r\n# type(soup)\r\n# title = soup.title\r\n# print(title)\r\n# hyper=soup.find_all('a')\r\n# print(hyper)\r\n\r\n\r\n# # # page = requests.get(url) # to extract page from website\r\n# # # html_code = page.content\r\n# # # soup = BeautifulSoup(html_code, 'html.parser') #Parse html code\r\n# # # texts = soup.findAll(text=True) #find all text\r\n# # # text_from_html = ' '.join(texts) #join all text\r\n\r\n\r\n#from pyexcel_xlsx import get_data\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nimport pandas as pd\r\n\r\n# url='https://www.yahoo.co.in/search?source=h+data+onAAZ0CiAGMQ4dUDCAg&uact=5'\r\n#df = pd.read_excel(r\"C:/Users/SSS2015045/Desktop/ptest.xlsx\")\r\ndf = pd.read_excel(r\"C:\\Users\\SSS2016056\\Desktop\\ptest.xlsx\")\r\ndf_list = list(df['first_refer'])\r\n# print(df_list[0])\r\n# for i in df_list['first_refer']:\r\n#\r\n# output = []\r\n# output['first_refer[1]'] = output[first_refer][0]\r\n# print(output)\r\n\r\n\r\n# print(df_list)\r\n\r\n# list1 = list(df['first_refer'])\r\n# # for i in list\r\n#\r\n# # # col=df.iloc[0]\r\n# # print(col)\r\n# #print(df)\r\n#\r\n#print(df_list[0])\r\nurl = df_list[1]\r\n#\r\n#\r\nkeywords1 = ['facebook', 'linkedin', 'youtube']\r\nkeywords2 = ['google', 'bing', 'yahoo', 'gmail']\r\nkeywords3 = ['ads', 'amazon']\r\n\r\nfor l in df_list:\r\n\r\n for key in keywords1:\r\n if key in l:\r\n print('Organic Social')\r\n\r\n else:\r\n for key in keywords2:\r\n if key in l:\r\n print('Organic search')\r\n\r\n else:\r\n for key in keywords3:\r\n if key in l:\r\n print('paid marketing')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"vyjayanthi03/sample","sub_path":"vasu.py","file_name":"vasu.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"7746734733","text":"import numpy as np\nimport math\n\n##\n##\n## function to take the input luminosity of a given band and return the \n## corresponding magnitude, for the appropriate bands and units as \n## listed below\n##\n## UNITS_... keywords tell it the units of L, whether solar L in the \n## band, bolometric L_sun (nuLnu/3.9d33), cgs (nuLnu/[erg/s])\n##\t\tdefault : bolometric L_sun\n##\n## BAND_... keywords tell it the relevant band, of UBVRIJHKugriz (SDSS \n##\t un-primed filters), or /BOLOMETRIC, or BAND_NUMBER uses the number of the \n## band below instead of making you explicitly name it\n##\t\tdefault : bolometric\n##\n## VEGA or AB keywords have it return either vega or ab magnitudes\n##\t\tdefault : VEGA for UBVRIJHK (Johnsons UBVRI, Cousins JHK), AB for ugriz \n##\n## L_NU or NU_L_NU tell it whether the input is in specific luminosity L_NU (e.g. \n## erg/s/Hz), or NU_L_NU (e.g. erg/s)\n##\t\tdefault : NU_L_NU\n##\n##\n##\n## Luminosity index legend\n## 0 = bolometric luminosity\n## 1 = Johnsons U\n## 2 = Johnsons B\n## 3 = Johnsons V\n## 4 = Johnsons R\n## 5 = Johnsons I\n## 6 = Cousins J\n## 7 = Cousins H\n## 8 = Cousins K\n## 9 = Sloan u\n## 10 = Sloan g\n## 11 = Sloan r\n## 12 = Sloan i\n## 13 = Sloan z\n##\n\ndef luminosity_to_magnitude( L, \\\n\tUNITS_SOLAR_BOL=0, UNITS_SOLAR_BAND=0, \\\n\tUNITS_CGS=0, \\\n\tNU_L_NU=0, L_NU=0, \\\n\tBAND_U=0,BAND_B=0,BAND_V=0,BAND_R=0,BAND_I=0, \\\n\tBAND_J=0,BAND_H=0,BAND_K=0,BAND_SDSS_u=0, \\\n\tBAND_SDSS_g=0,BAND_SDSS_r=0,BAND_SDSS_i=0, \\\n\tBAND_SDSS_z=0,BOLOMETRIC=0, BAND_NUMBER=0, \\\n\tVEGA=0, AB=0 , \\\n\tMAGNITUDE_TO_LUMINOSITY=0 ):\n\n N_BANDS = 14\n\n ## VEGA system\n ## from www.ucolick.org/~cnaw/sun.html\n ## following Fukugita et al. 1995, PASP, 105, 945\n mag_sun_vega = np.zeros(N_BANDS)\n mag_sun_vega[0] = 4.74; ##bolometric from Allen's Astrophysical Quantities\n mag_sun_vega[1] = 5.56; ##U [BESSEL]\n mag_sun_vega[2] = 5.45; ##B [BESSEL]\n mag_sun_vega[3] = 4.80; ##V [BESSEL]\n mag_sun_vega[4] = 4.46; ##R [KPNO]\n mag_sun_vega[5] = 4.10; ##I [KPNO]\n mag_sun_vega[6] = 3.66; ##J [BESSEL]\n mag_sun_vega[7] = 3.32; ##H [BESSEL]\n mag_sun_vega[8] = 3.28; ##K [BESSEL]\n mag_sun_vega[9] = 5.82; ##SDSS u [unprimed Vega]\n mag_sun_vega[10] = 5.44; ##SDSS g [unprimed Vega]\n mag_sun_vega[11] = 4.52; ##SDSS r [unprimed Vega]\n mag_sun_vega[12] = 4.11; ##SDSS i [unprimed Vega]\n mag_sun_vega[13] = 3.89; ##SDSS z [unprimed Vega]\n\n ## AB system\n mag_sun_ab = np.zeros(N_BANDS)\n mag_sun_ab[0] = 4.74; \n mag_sun_ab[1] = 6.34; ##U [BESSEL]\n mag_sun_ab[2] = 5.33; ##B [BESSEL]\n mag_sun_ab[3] = 4.81; ##V [BESSEL]\n mag_sun_ab[4] = 4.65; ##R [KPNO]\n mag_sun_ab[5] = 4.55; ##I [KPNO]\n mag_sun_ab[6] = 4.57; ##J [BESSEL]\n mag_sun_ab[7] = 4.71; ##H [BESSEL]\n mag_sun_ab[8] = 5.19; ##K [BESSEL]\n mag_sun_ab[9] = 6.75; ##SDSS u [unprimed AB]\n mag_sun_ab[10] = 5.33; ##SDSS g [unprimed AB]\n mag_sun_ab[11] = 4.67; ##SDSS r [unprimed AB]\n mag_sun_ab[12] = 4.48; ##SDSS i [unprimed AB]\n mag_sun_ab[13] = 4.42; ##SDSS z [unprimed AB]\n\n ## Effective wavelengths of the bands [in Angstroms], to compute nuLnu<->Lnu\n ## UBVRIJHK from http:##cassfos02.ucsd.edu/physics/ph162/mags.html\n ## SDSS ugriz from http:##www.sdss.org/dr4/instruments/imager/index.html#filters\n lambda_eff = np.zeros(N_BANDS)\n lambda_eff[0] = 1.0; ##bolometric, no nu\n lambda_eff[1] = 3600.0; ##U\n lambda_eff[2] = 4400.0; ##B\n lambda_eff[3] = 5556.0; ##V\n lambda_eff[4] = 6940.0; ##R\n lambda_eff[5] = 8700.0; ##I\n lambda_eff[6] = 12150.; ##J\n lambda_eff[7] = 16540.; ##H\n lambda_eff[8] = 21790.; ##K\n lambda_eff[9] = 3551.; ##SDSS u\n lambda_eff[10] = 4686.; ##SDSS g\n lambda_eff[11] = 6165.; ##SDSS r\n lambda_eff[12] = 7481.; ##SDSS i\n lambda_eff[13] = 8931.; ##SDSS z\n\n l_bol_sun = 3.9e33; ## bolometric solar in erg/s\n c_light = 2.998e10; ## speed of light in cm/s\n nu_eff = c_light/(lambda_eff * 1.0e-8); ## converts to nu_eff in Hz\n\n i_BAND = 0; ## default to bolometric \n if (1 == BAND_NUMBER) : i_BAND=BAND_NUMBER\n if (1 == BAND_U) : i_BAND=1\n if (1 == BAND_B) : i_BAND=2\n if (1 == BAND_V) : i_BAND=3\n if (1 == BAND_R) : i_BAND=4\n if (1 == BAND_I) : i_BAND=5\n if (1 == BAND_J) : i_BAND=6\n if (1 == BAND_H) : i_BAND=7\n if (1 == BAND_K) : i_BAND=8\n if (1 == BAND_SDSS_u) : i_BAND=9\n if (1 == BAND_SDSS_g) : i_BAND=10\n if (1 == BAND_SDSS_r) : i_BAND=11\n if (1 == BAND_SDSS_i) : i_BAND=12\n if (1 == BAND_SDSS_z) : i_BAND=13\n if (1 == BOLOMETRIC) : i_BAND=0\n\n ## default to Vega for bolometric & UBVRIJHK, and AB for ugriz\n vega_key = 1\n if ((i_BAND > 8) & (i_BAND <= 13)) : vega_key = 0\n if (VEGA==1) : vega_key=1\n if (AB==1) : vega_key=0\n magnitude_zero_point = mag_sun_vega[i_BAND]\n if (vega_key == 0) : magnitude_zero_point = mag_sun_ab[i_BAND]\n\n ## use the AB magnitudes to convert to an actual L_nu of the sun in each band\n lnu_sun_band = np.zeros(N_BANDS)\n ten_pc = 10.0 * 3.086e18; ## 10 pc in cm\n log_S_nu = -(mag_sun_ab + 48.6)/2.5;\t## zero point definition for ab magnitudes\n S_nu = 10.**log_S_nu; ## get the S_nu at 10 pc which defines M_AB\n lnu_sun_band = S_nu * (4.0*math.pi*ten_pc*ten_pc); ## multiply by distance modulus \n nulnu_sun_band = lnu_sun_band * nu_eff; ## multiply by nu_eff to get nu*L_nu\n ## correct the bolometric\n lnu_sun_band[0] = l_bol_sun;\n nulnu_sun_band[0] = l_bol_sun;\n\n ## check if we're reversing the routine to go magnitude to luminosity (instead of vice-versa)\n if (MAGNITUDE_TO_LUMINOSITY==1) : \n L_of_M = nulnu_sun_band[i_BAND] * 10.**(-0.4 * (L- magnitude_zero_point)) # here is magnitude\n ## now convert to appropriate units\n if (L_NU==1): L_of_M /= nu_eff[i_BAND];\n if (UNITS_SOLAR_BOL==1): return L_of_M/l_bol_sun;\n if (UNITS_SOLAR_BAND==1): \n if (L_NU==1): \n return L_of_M/lnu_sun_band[i_BAND];\n else:\n return L_of_M/nulnu_sun_band[i_BAND];\n if (UNITS_CGS==1): return L_of_M;\n return L_of_M/l_bol_sun;\n\n ## alright, now have lnu of the sun in each band (the appropriate normalization\n ## for either magnitude system), can compare with the input luminosity\n nulnu_given = L;\n if (1==NU_L_NU) : nulnu_given = L;\n if (1==L_NU) : nulnu_given = nu_eff[i_BAND] * L;\n\n ## default to assume in units of solar bolometric (if nu*L_nu): \n l_in_solar_in_band = nulnu_given * (l_bol_sun/nulnu_sun_band[i_BAND]);\n ## or L_nu(sun) in the band (if given L_nu):\n if (1==L_NU) : l_in_solar_in_band = L;\n\n ## convert to the appropriate units\n if (UNITS_SOLAR_BAND==1) : l_in_solar_in_band = nulnu_given; ## given in solar in band\n if (UNITS_SOLAR_BAND==1) and (L_NU==1) : l_in_solar_in_band = L;\n if (UNITS_SOLAR_BOL) : l_in_solar_in_band = nulnu_given * (l_bol_sun/nulnu_sun_band[i_BAND]);\n if (UNITS_CGS) : l_in_solar_in_band = nulnu_given / nulnu_sun_band[i_BAND];\n\n return magnitude_zero_point - 2.5*np.log10(l_in_solar_in_band);\n\n\n\n## routine to return the solar absolute magnitude in each band that the colors code gives \ndef get_solar_mags():\n\ts_UBVRIJHK = np.zeros(8)\n\ts_UBVRIJHK[0] = 5.66; #U\n\ts_UBVRIJHK[1] = 5.47; #B\n\ts_UBVRIJHK[2] = 4.82; #V\n\ts_UBVRIJHK[3] = 4.28; #R\n\ts_UBVRIJHK[4] = 3.94; #I\n\ts_UBVRIJHK[5] = 3.64; #J ?\n\ts_UBVRIJHK[6] = 3.44; #H ?\n\ts_UBVRIJHK[7] = 3.33; #K\n\n\ts_ugrizJHK = np.zeros(8)\n\ts_ugrizJHK[0] = 6.2789; #u\n\ts_ugrizJHK[1] = 4.9489; #g\n\ts_ugrizJHK[2] = 4.44964; #r\n\ts_ugrizJHK[3] = 4.34644; #i\n\ts_ugrizJHK[4] = 4.3592; #z\n\ts_ugrizJHK[5] = 3.64; #J ?\n\ts_ugrizJHK[6] = 3.44; #H ?\n\ts_ugrizJHK[7] = 3.33; #K\n\n\tsolar_mags = np.zeros(14)\n\tsolar_mags[0] = 4.74; #bolometric\n\tsolar_mags[1] = s_UBVRIJHK[0]\n\tsolar_mags[2] = s_UBVRIJHK[1]\n\tsolar_mags[3] = s_UBVRIJHK[2]\n\tsolar_mags[4] = s_UBVRIJHK[3]\n\tsolar_mags[5] = s_UBVRIJHK[4]\n\tsolar_mags[6] = s_UBVRIJHK[5]\n\tsolar_mags[7] = s_UBVRIJHK[6]\n\tsolar_mags[8] = s_UBVRIJHK[7]\n\tsolar_mags[9] = s_ugrizJHK[0]\n\tsolar_mags[10] = s_ugrizJHK[1]\n\tsolar_mags[11] = s_ugrizJHK[2]\n\tsolar_mags[12] = s_ugrizJHK[3]\n\tsolar_mags[13] = s_ugrizJHK[4]\n\n\treturn solar_mags\n","repo_name":"TomWagg/cogsworth","sub_path":"FIRE/helpers/colors_sps/lum_mag_conversions.py","file_name":"lum_mag_conversions.py","file_ext":"py","file_size_in_byte":8200,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"10849560864","text":"from flask import Flask, render_template\n\napp = Flask(_name_)\n\n@app.route ('/')\ndef saludar():\n return 'Mi dicionario de Slang Panameño! Elio Camarena'\n\n@app.route (\"/\")\ndef index():\n titulo = \"dicionario\"\n palabra = [\"xopa\",\"mopri\",\"parking\",\"yala vida\"]\n significado = [\"saludo\",\"primo\",\"fiesta\",\"asombro\"]\n return render_template(\"index.html\", titulo=titulo, palabra=palabra, significadi=significado) \n\nif _name==\"main_\":\n app.run(debug=True)\n","repo_name":"elioca08/JINJA","sub_path":"eliojinja.py","file_name":"eliojinja.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"3744592987","text":"from typing import List\n\n\ndef solution(progresses: List[int], speeds: List[int]) -> List[int]:\n # python list는 pop(0)을 하면 모든 값이 한 칸씩 시프팅되어\n # O(n)이 걸리므로 리스트를 뒤집어 사용 (pop()은 O(1))\n progresses = progresses[::-1]\n speeds = speeds[::-1]\n answer = []\n\n day = 0\n count = 0\n while progresses:\n print(f'day: {day}, count: {count}, progress: {progresses[-1] + speeds[-1] * day}, answer: {answer}')\n\n # 진도율이 100%를 넘어가면\n if (progresses[-1] + speeds[-1] * day) >= 100:\n count += 1\n progresses.pop()\n speeds.pop()\n else:\n day += 1\n # 기능 개선 완료된 것들이 있으면\n if count > 0:\n answer.append(count)\n count = 0\n\n\n answer.append(count)\n\n return answer\n\n\nif __name__ == \"__main__\":\n # progresses = [95, 90, 99, 99, 80, 99]\n # speeds = [1, 1, 1, 1, 1, 1]\n progresses = [93, 30, 55]\n speeds = [1, 30, 5]\n print(solution(progresses, speeds))","repo_name":"enirobot/codingtest","sub_path":"python/기능개발.py","file_name":"기능개발.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8295145030","text":"import json\nimport time\nimport datetime\nimport requests\nimport pybybit\nimport numpy as np\nimport pandas as pd\nfrom config import get_config\nfrom sklearn.linear_model import LinearRegression, Ridge\n\nfrom utils import discord_Notify\n\n\ndef get_btc_ohlcv():\n print(\"get btc ohlcv...\")\n dfs = []\n for k in range(4):\n t = int((datetime.datetime.now() - datetime.timedelta(days = k * 365)).timestamp()) // 86400 * 86400\n f = int((datetime.datetime.now() - datetime.timedelta(days = (k + 1) * 365)).timestamp()) // 86400 * 86400 \n MEX_UDF_URL = 'https://www.bitmex.com/api/udf/history'\n mex_param = {\n 'symbol':'XBTUSD',\n 'resolution': '1D',\n 'from':str(f),\n 'to':str(t)\n }\n while True:\n try:\n res_mex = requests.get(MEX_UDF_URL, mex_param)\n res_mex.raise_for_status()\n break\n except Exception as e:\n message = 'Get xbt ohlcv failed.:' + str(e)\n discord_Notify(message)\n time.sleep(2)\n continue\n ohlcv = res_mex.json()\n data = pd.DataFrame(index=ohlcv['t'], columns=[])\n data['open'] = pd.DataFrame(ohlcv['o'], index=ohlcv['t'])\n data['high'] = pd.DataFrame(ohlcv['h'], index=ohlcv['t'])\n data['low'] = pd.DataFrame(ohlcv['l'], index=ohlcv['t'])\n data['close'] = pd.DataFrame(ohlcv['c'], index=ohlcv['t'])\n data['volume'] = pd.DataFrame(ohlcv['v'], index=ohlcv['t'])\n data = data.reset_index().rename(columns = {\"index\" : \"time\"})\n data[\"time\"] = data[\"time\"].apply(datetime.datetime.fromtimestamp)\n dfs.append(data)\n# data = data[:len(data)-1]\n data = pd.concat(dfs, axis = 0)\n data.rename\n data = data.sort_values(by = \"time\").reset_index(drop = True)\n data = data[:-1]\n return data\n\ndef get_depth():\n print(\"get depth...\")\n deps = []\n for i, bp in enumerate([5, 10, 20, 30, 50, 90]):\n dep_url = f'http://data.bitcoinity.org/export_data.csv?bp={bp}&bu=c¤cy=USD&data_type=bidask_sum&exchange=bitmex×pan=all'\n res_dep = requests.get(dep_url)\n\n with open('tmp.csv', 'wb') as f:\n f.write(res_dep.content)\n dep = pd.read_csv('tmp.csv')\n dep[\"time\"] = dep[\"Time\"].apply(lambda x : datetime.datetime.strptime(x[:-4], '%Y-%m-%d %H:%M:%S'))\n dep = dep.rename(columns = {\"asks\" : f\"asks{bp}\", \"bids\" : f\"bids{bp}\"})\n if i == 0:\n deps.append(dep)\n else:\n deps.append(dep.drop([\"time\", \"Time\"], axis = 1))\n\n dep = pd.concat(deps, axis = 1)\n return dep\n\nfrom bitmex import bitmex\ndef get_eth():\n print(\"get eth...\")\n config = get_config()\n bitmex_client = bitmex(test=False, api_key=config[\"bitmex_key\"], api_secret=config[\"bitmex_secret\"])\n symbol = \"ETH\"\n t = datetime.datetime.strptime(\"2018-8\", \"%Y-%M\") ## これより過去は取れない\n df_eth = pd.DataFrame(bitmex_client.Trade.Trade_getBucketed(symbol=symbol, binSize=\"1d\", count=1000, reverse=False, startTime = t).result()[0])\n df_eth = df_eth[[\"timestamp\", \"open\", \"high\", \"low\", \"close\", \"trades\", \"volume\", \"vwap\"]]\n df_eth = df_eth.add_suffix(\"_eth\")\n df_eth[\"time\"] = df_eth[\"timestamp_eth\"].apply(lambda a : datetime.datetime.strptime(str(a).split(\"+\")[0], \"%Y-%m-%d %H:%M:%S\"))\n return df_eth\n\ndef get_data():\n data = get_btc_ohlcv()\n dep = get_depth()\n df = data.merge(dep)\n eth = get_eth()\n df = df.merge(eth, on = \"time\")\n feats = [\"time\", \"close\", \"open\", \"high\", \"low\", \"volume\",\n \"asks5\", \"bids5\", \"asks10\", \"bids10\", \"asks20\", \"bids20\", \"asks30\", \"bids30\", \"asks50\", \"bids50\", \"asks90\", \"bids90\"]\n feats += [\"open_eth\", \"high_eth\", \"low_eth\", \"close_eth\", \"trades_eth\", \"volume_eth\", \"vwap_eth\"]\n df = df[feats]\n return df\n","repo_name":"katsu1110/bybit-bot","sub_path":"data_get_funcs.py","file_name":"data_get_funcs.py","file_ext":"py","file_size_in_byte":3875,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"23536512871","text":"import MESH as mm\nimport neighbours as nh\nfrom timeit import default_timer as timer\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nmesh = mm.Mesh('viv.msh')\n\nit = 100\n\ndef run(n):\n t = np.zeros((n,3))\n for i in range(n):\n t1 = timer()\n a1, b1 = nh.py_neighbours(mesh.num_nodes, mesh.ien)\n t2 = timer()\n a2, b2 = nh.py_neighbours2(mesh.num_nodes, mesh.ien)\n t3 = timer()\n a3, b3 = nh.py_neighbours3(mesh.num_nodes, mesh.ien)\n t4 = timer()\n t[i][0] = t2-t1\n t[i][1] = t3-t2\n t[i][2] = t4-t3\n return t\n\nt = run(it)\nx = np.linspace(0, it+1, it)\nplt.figure()\nplt.plot(x, t[:,0], 'bo')\nplt.plot(x, t[:,1], 'r--')\nplt.plot(x, t[:,2], 'kx')\nplt.show()\n","repo_name":"luishcc/fempkg","sub_path":"tests/cython_neighbours/test-n.py","file_name":"test-n.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"24800568082","text":"from collections import deque\n\ndx = [0, 0, -1, 1]\ndy = [-1, 1, 0, 0]\n\n\ndef bfs(s):\n cnt = [[-1]*n for _ in range(n)]\n q = deque()\n for x, y in s:\n cnt[x][y] = 0\n q.append((x, y))\n time = 0\n while q:\n x, y = q.popleft()\n for i in range(4):\n nx, ny = x+dx[i], y+dy[i]\n if 0 <= nx < n and 0 <= ny < n:\n if not a[nx][ny] == 1 and cnt[nx][ny] < 0:\n cnt[nx][ny] = cnt[x][y]+1\n q.append((nx, ny))\n time = cnt[nx][ny]\n for i in range(n):\n for j in range(n):\n if cnt[i][j] < 0 and a[i][j] == 0:\n time = -1\n return time\n\n\ndef go(x, m, s):\n if m == 0:\n time = bfs(s)\n if time >= 0:\n ans.append(time)\n return\n for i in range(x, len(b)):\n go(i+1, m-1, s+[b[i]])\n\n\nn, m = map(int, input().split())\na = [list(map(int, input().split())) for _ in range(n)]\nb = list()\nfor i in range(n):\n for j in range(n):\n if a[i][j] == 2:\n b.append((i, j))\nans = []\ngo(0, m, [])\nif ans:\n print(min(ans))\nelse:\n print(-1)\n","repo_name":"JUNGJUNSEO/baekjun","sub_path":"백준/17141_연구소 2_211202.py","file_name":"17141_연구소 2_211202.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"9987642010","text":"#!/usr/bin/python\n\n# Pizza Till\n# Lewis Shaw\n\nimport os\nimport sys\nimport time\nimport re\n\nisProgramRuning = True\nwelcomeMessageDisplay = False\nlastShownMenu = 0\n\norder = { \"pizzas\": [] }\ncustomer = { \"customerName\": None, \"customerPhoneNumber\": None, \"customerAddress\": { \"postcode\": None, \"houseNumber\": None } }\n\nclass TooManyPizzasError(BaseException):\n pass\n\n\nclass OrderIsNotValidError(BaseException):\n pass\n\n\ndef is_order_valid() -> bool:\n pizzas = order[\"pizzas\"]\n\n if len(pizzas) > 0 and customer[\"customerName\"] != None:\n return True\n\n return False\n\n\ndef cancel_order():\n global order\n global customer\n\n order = { \"pizzas\": [] }\n customer = { \"customerName\": None, \"customerPhoneNumber\": None, \"customerAddress\": { \"postcode\": None, \"houseNumber\": None } }\n\n\ndef print_title(title: str):\n new_line()\n print(title)\n print(\".\" * 32)\n\n\ndef new_line():\n print()\n\n\ndef enter_customer_name():\n try:\n customerName = str(input(\"Enter customer's first name: \"))\n customerName = customerName.strip(\" \")\n\n if customerName.isalpha() == False:\n raise ValueError\n \n if len(customerName) == 0:\n raise ValueError\n\n customer[\"customerName\"] = customerName\n except ValueError:\n handle_error(\"Please enter an correct value.\")\n enter_customer_name()\n\n\ndef enter_customer_phone_number():\n try:\n customerPhoneNumber = str(input(\"Enter customer's phone number: \"))\n\n customerPhoneNumber = customerPhoneNumber.strip(\" \")\n\n if len(customerPhoneNumber) != 11:\n raise ValueError\n\n customer[\"customerPhoneNumber\"] = customerPhoneNumber\n\n except ValueError:\n handle_error(\"Please enter an correct UK phone number.\")\n enter_customer_phone_number()\n\n\ndef enter_customer_address():\n try:\n customerAddress = str(input(\"Enter customer's house number and postcode (seperate by a comma): \"))\n\n customerAddressDetails = customerAddress.split(\",\")\n houseNumber = str(customerAddress[0]).strip(\" \")\n postcode = str(customerAddressDetails[1]).strip(\" \")\n\n if houseNumber.isnumeric() == False:\n raise ValueError\n\n if re.search(\"/^[a-z]{1,2}\\d[a-z\\d]?\\s*\\d[a-z]{2}$/i\", postcode) == False:\n raise ValueError\n\n customer[\"customerAddress\"] = { \"houseNumber\": houseNumber, \"postcode\": postcode }\n except ValueError:\n handle_error(\"Please enter an correct house number and UK postcode.\")\n enter_customer_address()\n except IndexError:\n handle_error(\"Please seperate the address details by comma like this 32,PC11 4RT.\")\n enter_customer_address()\n\n\ndef customer_details():\n new_line()\n print(\"Customer Name:\", customer[\"customerName\"])\n print(\"Customer Phone Number:\", customer[\"customerPhoneNumber\"])\n print(\"Customer Address:\", customer[\"customerAddress\"])\n\n\ndef customer_details_menu():\n global customer\n\n print_title(\"Enter Customer Details\")\n\n new_line()\n\n enter_customer_name()\n enter_customer_phone_number()\n enter_customer_address()\n\n customer_details()\n new_line()\n\n answer = str(input(\"Is this data correct? \"))\n answer = answer.lower()\n\n if answer == \"yes\":\n clear_screen()\n showMenus(1)\n else:\n clear_screen()\n showMenus(2)\n\n\ndef complete_order_menu():\n print_title(\"Complete Order\")\n new_line()\n new_line()\n\n smallPizza = 0\n mediumPizza = 0\n largePizza = 0\n extraToppingsCharge = 0\n\n\n for pizza in order[\"pizzas\"]:\n if pizza[\"size\"] == \"small\":\n smallPizza += 1\n elif pizza[\"size\"] == \"medium\":\n mediumPizza += 1\n elif pizza[\"size\"] == \"large\":\n largePizza += 1\n\n if pizza[\"addedTopping\"] == 1:\n extraToppingsCharge += 0.75\n elif pizza[\"addedTopping\"] == 2:\n extraToppingsCharge += 1.35\n elif pizza[\"addedTopping\"] == 3:\n extraToppingsCharge += 2.00\n else:\n extraToppingsCharge += 2.50\n\n smallPizzaCost = round(smallPizza * 3.25, 2)\n mediumPizzaCost = round(mediumPizza * 5.50, 2)\n largePizzaCost = round(largePizza * 7.15, 2)\n extraToppingsCharge = round(extraToppingsCharge, 2)\n subtotal = round(smallPizzaCost + mediumPizzaCost + largePizzaCost + extraToppingsCharge, 2)\n\n print(\"_\" * 38)\n print(\"{:>15} {:>10} {:>10}\".format(\"Qty\", \"Pizza\", \"Amount\"))\n\n if smallPizza != 0:\n print(\"{:>15} {:>10} £{:>10}\".format(smallPizza, \"Small Pizza\", smallPizzaCost))\n if mediumPizza != 0:\n print(\"{:>15} {:>10} £{:>10}\".format(mediumPizza, \"Medium Pizza\", mediumPizzaCost))\n if largePizza != 0:\n print(\"{:>15} {:>10} £{:>10}\".format(largePizza, \"Large Pizza\", largePizzaCost))\n\n print(\"_\" * 38)\n\n if extraToppingsCharge != 0:\n print(\"{:>15} £{:>20}\".format(\"Extra Toppings Charge\", extraToppingsCharge))\n\n if subtotal >= round(20.0, 2):\n #subtotal = subtotal / 0.1\n print(\"{:>15} £{:>20}\".format(\"Discount\", \"10%\"))\n\n print(\"{:>15} £{:>20}\".format(\"Delivery Charge\", \"2.50\"))\n\n print(\"{:>15} £{:>20}\".format(\"Subtotal\", round(subtotal, 2)))\n\n print(\"_\" * 38)\n\n total = round(subtotal + 2.5, 2)\n\n print(\"{:>15} £{:>20}\".format(\"Total\", total))\n\n print(\"_\" * 38)\n\n new_line()\n new_line()\n\n corret = input(\"Is this corret? \").lower()\n\n if corret == \"yes\":\n cancel_order()\n clear_screen()\n showMenus(0)\n else:\n clear_screen()\n showMenus(1)\n\n\ndef add_pizza_menu():\n print_title(\"Select a Pizza Size\")\n print(\"1. Small - £3.25\")\n print(\"2. Medium - £5.50\")\n print(\"3. Large - £7.15\")\n print(\"4. Done?\")\n\n new_line()\n\n option = int(input(\"Please select an option: \"))\n \n try:\n if len(order[\"pizzas\"]) > 5 and option != 4:\n raise TooManyPizzasError\n elif option == 1:\n order[\"pizzas\"].append({ \"size\": \"small\", \"addedTopping\": None })\n elif option == 2:\n order[\"pizzas\"].append({ \"size\": \"medium\", \"addedTopping\": None })\n elif option == 3:\n order[\"pizzas\"].append({ \"size\": \"large\", \"addedTopping\": None })\n elif option == 4:\n clear_screen()\n showMenus(1)\n else:\n handle_error(str(option) + \" is not a correct option.\")\n except ValueError:\n handle_error(\"Please enter an correct option.\")\n except TooManyPizzasError:\n handle_error(\"You can only order a max of 6 pizzas at one time.\")\n\n\ndef pizza_toppings_menu():\n print_title(\"Add Toppings to Order\")\n new_line()\n\n pizzas = order[\"pizzas\"]\n pizzaIndex = 0\n\n for pizza in pizzas:\n print(pizzaIndex, pizza)\n pizzaIndex += 1\n\n print(\"7: Done?\")\n\n try:\n new_line()\n pizza = int(input(\"Select a pizza to add toppings to: \"))\n\n if pizza == 7:\n clear_screen()\n showMenus(1)\n\n if pizza > len(pizzas):\n raise ValueError\n\n new_line()\n toppings = int(input(\"How many toppings would you like: \"))\n\n pizzas[pizza][\"addedTopping\"] = toppings\n clear_screen()\n except ValueError:\n handle_error(\"Please enter an correct pizza.\")\n\n\ndef order_pizza_menu():\n print_title(\"Order Pizza\")\n print(\"1. Customer details\")\n print(\"2. Add pizza to order\")\n print(\"3. Add extra toppings to order\")\n print(\"4. Complete order\")\n print(\"5. Cancel\")\n\n new_line()\n\n try:\n option = int(input(\"Select an option > \"))\n\n if option == 1:\n if customer[\"customerName\"] == None:\n clear_screen()\n showMenus(2)\n else:\n customer_details()\n elif option == 2:\n clear_screen()\n showMenus(3)\n elif option == 3:\n clear_screen()\n showMenus(4)\n elif option == 4:\n if is_order_valid() == False:\n raise OrderIsNotValidError\n \n clear_screen()\n showMenus(5)\n elif option == 5:\n cancel_order()\n\n clear_screen()\n showMenus(0)\n else:\n handle_error(str(option) + \" is not a correct option.\")\n except ValueError:\n handle_error(\"Please enter an correct option.\")\n except OrderIsNotValidError:\n handle_error(\"The order is not valid, please check the order.\")\n\n\ndef welcome_message(username: str):\n print(\" Pizza till\")\n print(\" Welcome,\", username)\n print(\".\" * 32)\n new_line()\n\n\ndef main_menu():\n global isShowingMainMenu\n\n print_title(\"Main Menu\")\n print(\"1. Create an order\")\n print(\"2. Exit\")\n\n new_line()\n\n try:\n option = int(input(\"Select an option > \"))\n\n if option == 1:\n clear_screen()\n showMenus(1)\n elif option == 2:\n exit(0)\n else:\n handle_error(str(option) + \" is not a correct option.\")\n except ValueError:\n handle_error(\"Please enter an correct option.\")\n\n\ndef handle_error(error):\n clear_screen()\n\n if error == None or error == \"\":\n print(\"There was an unknown error.\")\n else:\n print(error)\n\n new_line()\n\n\ndef program():\n global isShowingMainMenu\n global welcomeMessageDisplay\n\n if welcomeMessageDisplay == False:\n welcome_message(\"Lewis\")\n welcomeMessageDisplay = True\n isShowingMainMenu = True\n\n showMenus(lastShownMenu)\n\n\ndef showMenus(index: int):\n global lastShownMenu\n\n if index == 0:\n lastShownMenu = 0\n main_menu()\n elif index == 1:\n lastShownMenu = 1\n order_pizza_menu()\n elif index == 2:\n lastShownMenu = 2\n customer_details_menu()\n elif index == 3:\n lastShownMenu = 3\n add_pizza_menu()\n elif index == 4:\n lastShownMenu = 4\n pizza_toppings_menu()\n elif index == 5:\n lastShownMenu = 5\n complete_order_menu()\n\n\ndef exit(code: int):\n global isProgramRuning\n isProgramRuning = False\n\n print(\"Exiting...\")\n time.sleep(1)\n sys.exit(code)\n\n\ndef clear_screen():\n if os.name == \"nt\":\n _ = os.system(\"cls\")\n else:\n _ = os.system(\"clear\")\n\n\nwhile isProgramRuning:\n program()","repo_name":"purplefrizzel/PizzaTill","sub_path":"pizza.py","file_name":"pizza.py","file_ext":"py","file_size_in_byte":10348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"28815186038","text":"import pandas as pd\nimport dash\nfrom dash import html\nfrom dash import dcc\nfrom dash.dependencies import Input, Output, State\nimport plotly.graph_objects as go\nimport plotly.express as px\nimport numpy as np\nfrom dash import no_update\n\napp = dash.Dash(__name__)\napp.config.suppress_callback_exceptions = True\n\nnetflix_data = pd.read_csv('NetflixCleanData.csv')\n\napp.layout = html.Div(children=[\n html.H1('Netflix Statistics by Year Range',\n style={'textAlign': 'center','color': '#503D36','font-size':45}),\n html.Div([\n html.Div(\n html.H2('Year Range',style={'margin-right':'2em'})\n ),\n dcc.Dropdown(id='yearrange-dropdown',\n options=[\n {'label':'1936 - 1945','value':'cat1'},\n {'label':'1946 - 1955','value':'cat2'},\n {'label':'1956 - 1965','value':'cat3'},\n {'label':'1966 - 1975','value':'cat4'},\n {'label':'1976 - 1985','value':'cat5'},\n {'label':'1986 - 1995','value':'cat6'},\n {'label':'1996 - 2005','value':'cat7'},\n {'label':'2006 - 2015','value':'cat8'},\n {'label':'2016 - 2021','value':'cat9'}\n ],\n value='cat1'\n ),\n html.Div([\n html.Div([],id='plot1'),\n html.Div([],id='plot2') \n ],style={'display':'flex'}\n ),\n html.Div([\n html.Div([],id='plot3'),\n html.Div([],id='plot4')\n ],style={'display':'flex'})\n ])\n])\n\n@app.callback([\n Output(component_id='plot1',component_property='children'),\n Output(component_id='plot2',component_property='children'),\n Output(component_id='plot3',component_property='children'),\n Output(component_id='plot4',component_property='children')],\n Input(component_id='yearrange-dropdown',component_property='value')\n)\ndef display_netflix_yearly_charts(value):\n # main df\n if value == 'cat1': years = [1936,1945]\n elif value == 'cat2': years = [1946,1955]\n elif value == 'cat3': years = [1956,1965]\n elif value == 'cat4': years = [1966,1975]\n elif value == 'cat5': years = [1976,1985]\n elif value == 'cat6': years = [1986,1995]\n elif value == 'cat7': years = [1996,2005]\n elif value == 'cat8': years = [2006,2015]\n else: years = [2016,2021]\n df = netflix_data[(netflix_data['release_year'] >= years[0]) & (netflix_data['release_year'] <= years[1])]\n # Movies vs TV Shows\n netflix_types = df['type'].value_counts()\n typesfig = go.Figure(data=[go.Pie(labels=netflix_types.index,values=netflix_types,hole=0.3,pull=[0.0,0.2])])\n typesfig.update_layout(\n title={'text':'Movies vs TV Shows Proportion','x':0.5,'xanchor':'center'}\n )\n typesfig.update_traces(\n marker=dict(colors=['#316395','#B82E2E'])\n )\n # Top 10 countries of origin\n df_top_countries = df['country'].value_counts(ascending=False).head(10)\n countriesfig = go.Figure(data=[go.Bar(x=df_top_countries.index,y=df_top_countries,marker_color='#FFA15A')])\n countriesfig.update_layout(\n title={'text':'Top countries of origin','x':0.5,'xanchor':'center'}\n )\n # Releases per year\n df_years_releases = df['release_year'].value_counts()\n yearsfig = go.Figure(data=[go.Bar(x=list(map(str,df_years_releases.index)),y=df_years_releases,marker_color=\"#66AA00\")])\n yearsfig.update_layout(\n title={'text':'Top years by number of releases','x':0.5,'xanchor':'center'}\n )\n # Top categories\n df_categories = np.asarray(df['categories'])\n categories_dict = {}\n for category in df_categories:\n categories = category.split(\", \")\n for cat in categories:\n if cat in categories_dict.keys(): categories_dict[cat] = categories_dict[cat] + 1\n else: categories_dict[cat] = 1\n categories_dict = sorted(categories_dict.items(),key=lambda x:x[1])\n sorted_categories_dict = dict(categories_dict)\n categories_series = pd.Series([x for x in sorted_categories_dict.values()],\n index=[x for x in sorted_categories_dict.keys()])\n categories_series = categories_series[:10]\n categoriesfig = go.Figure(data=[go.Bar(x=categories_series.index,y=categories_series,marker_color='#7F7F7F')])\n categoriesfig.update_layout(\n title={'text':'Top 10 categories','x':0.5,'xanchor':'center'}\n )\n\n return [dcc.Graph(figure=typesfig),\n dcc.Graph(figure=countriesfig),\n dcc.Graph(figure=yearsfig),\n dcc.Graph(figure=categoriesfig)]\n\nif __name__ == '__main__':\n app.run_server()\n","repo_name":"JavRamCos/DataScienceProjects","sub_path":"NetflixDatabaseVisualization/NetflixDashboard.py","file_name":"NetflixDashboard.py","file_ext":"py","file_size_in_byte":4601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"559810529","text":"#!/usr/bin/env python3\nimport os\nimport sys\nimport requests\nimport time\nimport shutil\nfrom resources.log import getLogger\nfrom resources.readsettings import ReadSettings\nfrom resources.metadata import MediaType\nfrom resources.mediaprocessor import MediaProcessor\n\n\ndef rescanAndWait(host, port, webroot, apikey, protocol, movieid, log, retries=6, delay=10):\n headers = {'X-Api-Key': apikey}\n # First trigger rescan\n payload = {'name': 'RescanMovie', 'movieId': movieid}\n url = protocol + host + \":\" + str(port) + webroot + \"/api/command\"\n r = requests.post(url, json=payload, headers=headers)\n rstate = r.json()\n try:\n rstate = rstate[0]\n except:\n pass\n log.info(\"Radarr response Rescan command: ID %d %s.\" % (rstate['id'], rstate['state']))\n log.debug(str(rstate))\n\n # Then wait for it to finish\n url = protocol + host + \":\" + str(port) + webroot + \"/api/command/\" + str(rstate['id'])\n log.info(\"Waiting rescan to complete\")\n r = requests.get(url, headers=headers)\n command = r.json()\n attempts = 0\n while command['state'].lower() not in ['complete', 'completed'] and attempts < retries:\n log.debug(\"State: %s.\" % (command['state']))\n time.sleep(delay)\n r = requests.get(url, headers=headers)\n command = r.json()\n attempts += 1\n log.info(\"Final state: %s.\" % (command['state']))\n log.debug(str(command))\n return command['state'].lower() in ['complete', 'completed']\n\n\ndef getMovieInformation(host, port, webroot, apikey, protocol, movieid, log):\n headers = {'X-Api-Key': apikey}\n url = protocol + host + \":\" + str(port) + webroot + \"/api/movie/\" + movieid\n log.info(\"Requesting updated information from Radarr for movie ID %s.\" % movieid)\n r = requests.get(url, headers=headers)\n payload = r.json()\n return payload\n\n\ndef renameFile(inputfile, log):\n filename, fileext = os.path.splitext(inputfile)\n outputfile = \"%s.rnm%s\" % (filename, fileext)\n i = 2\n while os.path.isfile(outputfile):\n outputfile = \"%s.rnm%d%s\" % (filename, i, fileext)\n i += 1\n os.rename(inputfile, outputfile)\n log.debug(\"Renaming file %s to %s.\" % (inputfile, outputfile))\n return outputfile\n\n\ndef renameMovie(host, port, webroot, apikey, protocol, movieid, log):\n headers = {'X-Api-Key': apikey}\n # First trigger rescan\n payload = {'name': 'RenameMovie', 'movieIds': [movieid]}\n url = protocol + host + \":\" + str(port) + webroot + \"/api/command\"\n r = requests.post(url, json=payload, headers=headers)\n rstate = r.json()\n try:\n rstate = rstate[0]\n except:\n pass\n log.info(\"Radarr response Rename command: ID %d %s.\" % (rstate['id'], rstate['state']))\n log.debug(str(rstate))\n\n\ndef backupSubs(dir, mp, log, extension=\".backup\"):\n files = []\n output = {}\n for r, _, f in os.walk(dir):\n for file in f:\n files.append(os.path.join(r, file))\n for filepath in files:\n info = mp.isValidSubtitleSource(filepath)\n if info:\n newpath = filepath + extension\n shutil.copy2(filepath, newpath)\n output[newpath] = filepath\n log.info(\"Copying %s to %s.\" % (filepath, newpath))\n return output\n\n\ndef restoreSubs(subs, log):\n for k in subs:\n try:\n os.rename(k, subs[k])\n log.info(\"Restoring %s to %s.\" % (k, subs[k]))\n except:\n os.remove(k)\n log.exception(\"Unable to restore %s, deleting.\" % (k))\n\n\nlog = getLogger(\"RadarrPostProcess\")\n\nlog.info(\"Radarr extra script post processing started.\")\n\nif os.environ.get('radarr_eventtype') == \"Test\":\n sys.exit(0)\n\nsettings = ReadSettings()\n\nlog.debug(os.environ)\n\ninputfile = os.environ.get('radarr_moviefile_path')\noriginal = os.environ.get('radarr_moviefile_scenename')\nimdbid = os.environ.get('radarr_movie_imdbid')\ntmdbid = os.environ.get('radarr_movie_tmdbid')\nmovieid = os.environ.get('radarr_movie_id')\n\nmp = MediaProcessor(settings)\n\nlog.debug(\"Input file: %s.\" % inputfile)\nlog.debug(\"Original name: %s.\" % original)\nlog.debug(\"IMDB ID: %s.\" % imdbid)\nlog.debug(\"TMDB ID: %s.\" % tmdbid)\nlog.debug(\"Radarr Movie ID: %s.\" % movieid)\n\ntry:\n if settings.Radarr.get('rename'):\n # Prevent asynchronous errors from file name changing\n mp.settings.waitpostprocess = True\n try:\n inputfile = renameFile(inputfile, log)\n except:\n log.exception(\"Error renaming inputfile\")\n\n success = mp.fullprocess(inputfile, MediaType.Movie, original=original, tmdbid=tmdbid, imdbid=imdbid)\n\n if success:\n # Update Radarr to continue monitored status\n try:\n host = settings.Radarr['host']\n port = settings.Radarr['port']\n webroot = settings.Radarr['webroot']\n apikey = settings.Radarr['apikey']\n ssl = settings.Radarr['ssl']\n protocol = \"https://\" if ssl else \"http://\"\n\n log.debug(\"Radarr host: %s.\" % host)\n log.debug(\"Radarr port: %s.\" % port)\n log.debug(\"Radarr webroot: %s.\" % webroot)\n log.debug(\"Radarr apikey: %s.\" % apikey)\n log.debug(\"Radarr protocol: %s.\" % protocol)\n\n if apikey != '':\n headers = {'X-Api-Key': apikey}\n\n subs = backupSubs(os.path.split(success[0])[0], mp, log)\n\n if rescanAndWait(host, port, webroot, apikey, protocol, movieid, log):\n log.info(\"Rescan command completed\")\n\n movieinfo = getMovieInformation(host, port, webroot, apikey, protocol, movieid, log)\n if not movieinfo.get('hasFile'):\n log.warning(\"Rescanned movie does not have a file, attempting second rescan.\")\n if rescanAndWait(host, port, webroot, apikey, protocol, movieid, log):\n movieinfo = getMovieInformation(host, port, webroot, apikey, protocol, movieid, log)\n if not movieinfo.get('hasFile'):\n log.warning(\"Rescanned movie still does not have a file, will not set to monitored to prevent endless loop.\")\n sys.exit(1)\n else:\n log.info(\"File found after second rescan.\")\n else:\n log.error(\"Rescan command timed out\")\n restoreSubs(subs, log)\n sys.exit(1)\n\n if len(subs) > 0:\n log.debug(\"Restoring %d subs and triggering a final rescan.\" % (len(subs)))\n restoreSubs(subs, log)\n rescanAndWait(host, port, webroot, apikey, protocol, movieid, log)\n\n movieinfo['monitored'] = True\n\n # Then set that movie to monitored\n log.debug(\"Sending PUT request with following payload:\")\n log.debug(str(movieinfo)) # debug\n\n url = protocol + host + \":\" + str(port) + webroot + \"/api/movie/\" + str(movieid)\n r = requests.put(url, json=movieinfo, headers=headers)\n success = r.json()\n\n log.debug(\"PUT request returned:\")\n log.debug(str(success))\n log.info(\"Radarr monitoring information updated for movie %s.\" % success['title'])\n\n renameMovie(host, port, webroot, apikey, protocol, movieid, log)\n else:\n log.error(\"Rescan command timed out\")\n sys.exit(1)\n else:\n log.error(\"Your Radarr API Key is blank. Update autoProcess.ini to enable status updates.\")\n except:\n log.exception(\"Radarr monitor status update failed.\")\n else:\n log.info(\"Processing returned False.\")\n sys.exit(1)\nexcept:\n log.exception(\"Error processing file\")\n sys.exit(1)\n","repo_name":"oregonpillow/sickbeard_mp4_automator","sub_path":"postRadarr.py","file_name":"postRadarr.py","file_ext":"py","file_size_in_byte":8009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"36872744697","text":"from pathlib import Path\n\nimport pytest\nimport yaml\n\nfrom marketdata import datasource as ds\nfrom marketdata import util\nfrom marketdata.exceptions import DataSourceException\n\nDC = util.DataSourceConstants\n\n\n@pytest.fixture(autouse=True)\ndef mock_app_config(mocker):\n \"\"\"\n Mocks the app configuration\n \"\"\"\n conf_file_path = Path(__file__).resolve().parent.parent.joinpath(\"resources\", \"test_conf.yaml\")\n if Path(conf_file_path).exists():\n with open(conf_file_path) as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n yield mocker.patch(\"marketdata.datasource.util.read_app_config\", return_value=config, autospec=True)\n\n\ndef test_init_generic_ds():\n \"\"\"\n Test creating generic data source objects\n \"\"\"\n test_config = util.read_app_config()\n data_source_list = test_config[DC.DATA_SOURCES_PARENT]\n datasource = ds.DataSource(data_source_list[0])\n assert datasource.name == data_source_list[0][DC.NAME]\n datasource = ds.DataSource(data_source_list[1])\n assert datasource.name == data_source_list[1][DC.NAME]\n\n datasource = ds.DataSource(data_source_list[2])\n assert datasource.name == data_source_list[2][DC.NAME]\n\n\ndef test_init_invalid_ds():\n \"\"\"\n Tests creating data sources with invalid configurations\n \"\"\"\n test_config = util.read_app_config()\n data_source_list = test_config[DC.DATA_SOURCES_PARENT]\n with pytest.raises(DataSourceException, match=r\".*Base Url is required when data source is not a \"\n r\"library*.\"):\n ds.DataSource(data_source_list[3])\n\n with pytest.raises(DataSourceException, match=r\".*API Authentication token is a required field and is \"\n r\"missing in configuration*.\"):\n ds.DataSource(data_source_list[4])\n\n\ndef test_create_generic_ds():\n \"\"\"\n Tests creating a generic data source using create data source function\n \"\"\"\n test_config = util.read_app_config()\n data_source_list = test_config[DC.DATA_SOURCES_PARENT]\n\n datasource = ds.create_datasource(data_source_list[0][DC.NAME])\n assert isinstance(datasource, ds.DataSource)\n assert datasource.name == data_source_list[0][DC.NAME]\n\n datasource = ds.create_datasource(data_source_list[1][DC.NAME])\n assert isinstance(datasource, ds.DataSource)\n assert datasource.name == data_source_list[1][DC.NAME]\n\n datasource = ds.create_datasource(data_source_list[2][DC.NAME])\n assert isinstance(datasource, ds.DataSource)\n assert datasource.name == data_source_list[2][DC.NAME]\n\n\ndef test_create_generic_ds_with_invalid_config():\n \"\"\"\n Tests creating a generic data source with invalid config using create data source function\n \"\"\"\n test_config = util.read_app_config()\n data_source_list = test_config[DC.DATA_SOURCES_PARENT]\n\n with pytest.raises(DataSourceException):\n ds.create_datasource(data_source_list[3][DC.NAME])\n\n with pytest.raises(DataSourceException):\n ds.create_datasource(data_source_list[4][DC.NAME])\n\n\ndef test_create_ds_without_config():\n \"\"\"\n Test creating a datasource where the data source config does not exist in the app config\n \"\"\"\n with pytest.raises(ValueError):\n ds.create_datasource(\"XXX\")\n\n\ndef test_init_ds_with_empty_config():\n \"\"\"\n Tests the behaviour when a data source is created without a config\n \"\"\"\n with pytest.raises(DataSourceException, match=r\".*Configuration object is empty or not a required typ*.\"):\n ds.DataSource({})\n\n\n# noinspection PyTypeChecker\ndef test_init_ds_with_invalid_conf_type():\n \"\"\"\n Tests creating data source instances with invalid config type\n \"\"\"\n with pytest.raises(DataSourceException, match=\".*Configuration object is empty or not a required type*.\"):\n ds.DataSource(None)\n\n with pytest.raises(DataSourceException, match=\".*Configuration object is empty or not a required type*.\"):\n ds.DataSource(\"config\")\n","repo_name":"madawas/market-analysis","sub_path":"tests/unit/test_datasource.py","file_name":"test_datasource.py","file_ext":"py","file_size_in_byte":3983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2301695909","text":"#! /usr/bin/env/ python3\n\nimport pyautogui as p\nfrom itertools import repeat\nimport random\n\np.FAILSAFE = True # With the Failsafe set to 'True' the upper left corner will stop running the code. Set it to False if you don't want to have it enabled.\n\n\nnumber = random.randrange(1000, 10000, 1)\t\t\t# Created a random number between 1000 and 10000\t\t\t\t\t\t\t\t\t\t# Same here\n\nfor runs in repeat(None, number):\t\t\t\t\t# Looping through the number of times. Using the reapeat function to specify how many times.\n\tcount1 = random.randrange(1, 1440, 1)\t\t\t# Creating random X to move on display\n\tcount2 = random.randrange(335, 1335, 1)\t\t\t# Creating random Y to move on display\n\tp.moveTo(count1, count2)\t\t\t\t\t\t# Moves to the coordinates\n\np.alert(\"You have been trolled \"+number+\" times!. Thank you for your patience.\")\n","repo_name":"eLVee1991/Trolls","sub_path":"evilmouse.py","file_name":"evilmouse.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"42386991935","text":"import random\nimport string\nfrom datetime import date\n\n\ndef print_day_of_week():\n \"\"\" Use if elif else construction to print the verbose version of weekday. \"\"\"\n today = date.today()\n weekday = today.weekday() # Values from 0 - 6\n\n # Finish the function here\n\n if weekday == 0:\n day = \"Monday\"\n elif weekday == 1:\n day = \"Tuesday\"\n elif weekday == 2:\n day = \"Wednesday\"\n elif weekday == 3:\n day = \"Thursday\"\n elif weekday == 4:\n day = \"Friday\"\n elif weekday == 5:\n day = \"Saturday\"\n else:\n day = \"Sunday\"\n\n print(day)\n\n\ndef get_products():\n \"\"\" Should fill and return the products list \"\"\"\n products = []\n\n for number in range(1, random.randint(100, 1000)):\n # add (number ** 2 - 1) to products if number > 5 and is odd\n if number > 5 and number % 2 != 0:\n products.append(number ** 2 - 1)\n\n return products\n\n\ndef get_random_string():\n string_els = []\n\n # Using while loop, fill the string_els with 100 random\n # selected letters from the english alphabet\n # Return the string_els as string.\n\n index = 0\n while index < 100:\n string_els.append(random.choice(string.ascii_letters))\n index += 1\n\n return \"\".join(string_els)\n\n\ndef print_even_members():\n range_from = random.randint(1, random.choice([100, 200, 300, 400]))\n range_to = range_from + 100\n range_obj = range(range_from, range_to)\n\n # Use a for loop to iterate over the range_obj\n # Use if statement to print out only even members\n # Stop printing out if you have printed 50 members\n\n counter = 0\n for num in range_obj:\n if counter >= 50: # It's ok, but len of range_obj always is 100, then even numbers is 50? default or not?\n break\n if num % 2 == 0:\n print(num)\n counter += 1\n\n\nprint(\"------------------1.print_day_of_week--------------------------\")\n\nprint_day_of_week()\n\nprint(\"------------------2.get_products--------------------------\")\n\nprint(get_products())\n\nprint(\"------------------3.get_random_string--------------------------\")\n\nprint(get_random_string())\n\nprint(\"------------------4.print_even_members--------------------------\")\n\nprint_even_members()\n","repo_name":"VenziVi/CA_Course","sub_path":"Python-Course/2.Control_flow/Homework/1.Homework.py","file_name":"1.Homework.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"29906548540","text":"import dataclasses\nimport functools\nfrom typing import Any, Sequence, Union, Optional, Tuple\n\nimport pipeline_dp\n\nfrom utility_analysis import non_private_combiners\n\nDataType = Union[Sequence[Any]]\ntry:\n from pyspark import RDD\n DataType = Union[DataType, RDD]\nexcept ImportError:\n pass\n\ntry:\n from apache_beam import pvalue\n DataType = Union[DataType, pvalue.PCollection]\nexcept ImportError:\n pass\n\n@dataclasses.dataclass(frozen=True)\nclass SampleParams:\n number_of_sampled_partitions: int\n metrics: Optional[Sequence[pipeline_dp.Metrics]] = None\n\n\ndef _extract_fn(data_extractors: pipeline_dp.DataExtractors,\n row: DataType) -> DataType:\n \"\"\"Extracts the columns to (pid, pkey, pvalue).\n\n Args:\n data_extractors: A function to extract privacy_id, partition_key, value of\n the input data.\n row: The data to extract, should usually be raw input of the pipline.\n\n Returns:\n Data in format of (pid, pkey, pvalue) defined by the extractors.\n \"\"\"\n return data_extractors.privacy_id_extractor(\n row), data_extractors.partition_extractor(\n row), data_extractors.value_extractor(row)\n\n\nclass DataPeeker:\n \"\"\"A helper class that contains methods to for privacy utility analysis.\"\"\"\n\n def __init__(self, ops: pipeline_dp.pipeline_backend.PipelineBackend):\n self._be = ops\n\n def sketch(self, input_data: DataType, params: SampleParams,\n data_extractors: pipeline_dp.DataExtractors) -> DataType:\n \"\"\"Generates sketches in the format of (partition_key, value, partition_count).\n\n The sketches has one entry for each unique (partition_key, privacy_id).\n Parameter tuning on outputs of sketch ignores `min_value` and `max_value` of\n AggregateParams\n\n partition_key: the hashed version of the current partition key\n partition_value: the per privacy id per partition_key aggregated value\n partition_count: the number of partitions this privacy id contributes to\n\n Args:\n input_data: The data to sample. It can be local data, beam PCollection or\n Spark RDD depending on the engine used.\n params: The parameters defining sampling properties.\n data_extractors: A function to extract privacy_id, partition_key, value of\n the input data.\n\n Returns:\n Sketches in the format of (partition_key, value, partition_count).\n \"\"\"\n if params.metrics is None:\n raise ValueError(\"Must provide aggregation metrics for sketch.\")\n if len(params.metrics) != 1 or params.metrics[0] not in [\n pipeline_dp.aggregate_params.Metrics.SUM,\n pipeline_dp.aggregate_params.Metrics.COUNT\n ]:\n raise ValueError(\n \"Sketch only supports a single aggregation and it must be COUNT or SUM.\"\n )\n combiner = non_private_combiners.create_compound_combiner(\n metrics=params.metrics)\n\n # Extract the columns.\n col = self._be.map(input_data,\n functools.partial(_extract_fn, data_extractors),\n \"Extract (privacy_id, partition_key, value))\")\n # col : (privacy_id, partition_key, value)\n col = self._be.map_tuple(\n col, lambda pid, pk, v: (pk, (pid, v)),\n \"Rekey to (partition_key, (privacy_id, value))\")\n # col : (partition_key, (privacy_id, value))\n # sample\n # group by key, filter keys by sampling, expand the values by flat map\n col = self._be.group_by_key(col, \"Group by pk\")\n col = self._be.map_tuple(col, lambda pk, pid_v_seq: (1,\n (pk, pid_v_seq)),\n \"Rekey to (1, (pk, pid_v_seq))\")\n col = self._be.sample_fixed_per_key(col,\n params.number_of_sampled_partitions,\n \"Sample partitions\")\n col = self._be.flat_map(col, lambda plst: plst[1], \"Extract values\")\n\n def flatten_sampled_results(\n pk_pid_pval_list: Tuple[Any, Sequence[Tuple[Any, Any]]]\n ) -> Sequence[Tuple[Any, Tuple[Any, Any]]]:\n pk, pid_pval_list = pk_pid_pval_list\n return [(pk, pid_pval) for pid_pval in pid_pval_list]\n\n col = self._be.flat_map(col, flatten_sampled_results,\n \"Flatten to (pk, (pid, value))\")\n\n # col : (partition_key, (privacy_id, value))\n # calculates partition_count after sampling and per\n # (partition_key, privacy_id) pair aggregated value\n col = self._be.map_tuple(col, lambda pk, pid_v: (\n (pk, pid_v[0]), pid_v[1]), \"Transform to (pk, pid), value))\")\n # col : ((partition_key, privacy_id), value)\n\n col = self._be.group_by_key(col, \"Group by (pk, pid)\")\n # col : ((partition_key, privacy_id), [value])\n col = self._be.map_values(col, combiner.create_accumulator,\n \"Aggregate by (pk, pid)\")\n # col : ((partition_key, privacy_id), accumulator)\n col = self._be.map_tuple(\n col, lambda pk_pid, p_value: (pk_pid[1], (pk_pid[0], p_value)),\n \"Transform to (pid, (pk, accumulator))\")\n # col : (privacy_id, (partition_key, accumulator))\n col = self._be.group_by_key(col, \"Group by privacy_id\")\n\n key_accumulator_sequence_type = Sequence[Tuple[\n Any, pipeline_dp.accumulator.Accumulator]]\n\n def calculate_partition_count(\n key_accumulator_list: key_accumulator_sequence_type\n ) -> Tuple[int, key_accumulator_sequence_type]:\n partition_count = len(set(pk for pk, _ in key_accumulator_list))\n return (partition_count, key_accumulator_list)\n\n col = self._be.map_values(col, calculate_partition_count,\n \"Calculates partition_count\")\n\n # col : (privacy_id, (partition_count, [(partition_key, accumulator)]))\n\n def flatten_results(\n input_col: Tuple[Any, Tuple[int, key_accumulator_sequence_type]]\n ) -> Sequence[Tuple[Any, Any, int]]:\n _, pcount_pk_acc_list = input_col\n pcount, pk_acc_list = pcount_pk_acc_list\n return [(pk, acc[0], pcount) for pk, acc in pk_acc_list]\n\n return self._be.flat_map(\n col, flatten_results,\n \"Flatten to (pk, aggregated_value, partition_count)\")\n # (partition_key, aggregated_value, partition_count)\n\n def sample(self, input_data: DataType, params: SampleParams,\n data_extractors: pipeline_dp.DataExtractors) -> DataType:\n \"\"\"Generates sampled outputs of the input data according to sample parameters.\n\n The sampling is by partitions. e.g. a certain amount of partitions_keys are\n selected and the output contains all records with these partition_keys.\n\n Args:\n input_data: The data to sample. It can be local data, beam PCollection or\n Spark RDD depending on the engine used.\n params: The parameters defining sampling properties.\n data_extractors: A function to extract privacy_id, partition_key, value of\n the input data.\n\n Returns:\n Sampled output containing tuple of (privacy_id, partition_key, value).\n \"\"\"\n\n col = self._be.map(input_data,\n functools.partial(_extract_fn, data_extractors),\n \"Extract (privacy_id, partition_key, value))\")\n # col : (privacy_id, partition_key, value)\n col = self._be.map_tuple(\n col, lambda pid, pk, v: (pk, (pid, v)),\n \"Rekey to (partition_key, (privacy_id, value))\")\n # col : (partition_key, (privacy_id, value))\n # Sample the data.\n # group by key, filter keys by sampling, expand the values by flat map\n col = self._be.group_by_key(col, \"Group by pk\")\n col = self._be.map_tuple(col, lambda pk, pid_v_seq: (1,\n (pk, pid_v_seq)),\n \"Rekey to (1, (pk, pid_v_seq))\")\n col = self._be.sample_fixed_per_key(col,\n params.number_of_sampled_partitions,\n \"Sample partitions\")\n col = self._be.flat_map(col, lambda plst: plst[1], \"\")\n\n def expand_fn(pk_pidandvseq: DataType):\n pk, pid_pv_seq = pk_pidandvseq\n return [(pid, pk, v) for pid, v in pid_pv_seq]\n\n col = self._be.flat_map(col, expand_fn, \"Transform to (pid, pk, value)\")\n return col\n\n def aggregate_true(self, col, params: SampleParams,\n data_extractors: pipeline_dp.DataExtractors) -> DataType:\n \"\"\"Computes raw aggregation results of the input data without adding noises.\n\n Aggregation means aggregate values group by partition_key. Both values and\n partition_key are extracted by data extractors.\n\n Args:\n input_data: The data to sample. It can be local data, beam PCollection or\n Spark RDD depending on the engine used.\n data_extractors: A function to extract privacy_id, partition_key, value of\n the input data.\n\n Returns:\n True aggregation results.\n \"\"\"\n combiner = non_private_combiners.create_compound_combiner(\n metrics=params.metrics)\n\n col = self._be.map(\n col, lambda row: (data_extractors.privacy_id_extractor(row),\n data_extractors.partition_extractor(row),\n data_extractors.value_extractor(row)),\n \"Extract (privacy_id, partition_key, value))\")\n # col : (privacy_id, partition_key, value)\n col = self._be.map_tuple(\n col, lambda pid, pk, v: ((pid, pk), v),\n \"Rekey to ( (privacy_id, partition_key), value))\")\n col = self._be.group_by_key(col, \"Group by pk\")\n col = self._be.map_values(col, combiner.create_accumulator,\n \"Aggregate by (pk, pid)\")\n # ((privacy_id, partition_key), aggregator)\n col = self._be.map_tuple(col, lambda pid_pk, v: (pid_pk[1], v),\n \"Drop privacy id\")\n # col : (partition_key, accumulator)\n col = self._be.combine_accumulators_per_key(\n col, combiner, \"Reduce accumulators per partition key\")\n # col : (partition_key, accumulator)\n # Compute metrics.\n col = self._be.map_values(col, combiner.compute_metrics,\n \"Compute DP metrics\")\n # col : (partition_key, aggregated_value)\n return col\n","repo_name":"OpenMined/PipelineDP","sub_path":"utility_analysis/data_peeker.py","file_name":"data_peeker.py","file_ext":"py","file_size_in_byte":10825,"program_lang":"python","lang":"en","doc_type":"code","stars":262,"dataset":"github-code","pt":"16"} +{"seq_id":"72570211849","text":"import pytest\nfrom flask import url_for\n\n\n@pytest.mark.usefixtures('db')\nclass TestOrder:\n\n def test_get_checkout(self, testapp):\n resp = testapp.get(url_for('order.checkout'))\n assert resp.status_code == 302\n\n @pytest.mark.skip(reason=\"not working\")\n def test_get_complete(user, client):\n client.login_user()\n resp = client.get(url_for('order.complete'))\n assert resp.status_code == 200\n\n @pytest.mark.skip(reason=\"not working\")\n def test_get_complete(user, client):\n client.login_user()\n resp = client.post(url_for('order.charge'))\n print(dir(resp))\n assert resp.status_code == 200\n","repo_name":"maikeulb/stationaryshop","sub_path":"src/tests/test_order.py","file_name":"test_order.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"15527331168","text":"import random\n\n\nMIN_PROBLEM_NUMB, MAX_PROBLEM_NUMB = 1, 100\nGAMES_QUESTION = 'Answer \"yes\" if the number is even, otherwise answer \"no\".'\n\n\ndef is_number_even(number):\n return number % 2 == 0\n\n\ndef make_problem_with_solution():\n problem = random.randint(MIN_PROBLEM_NUMB, MAX_PROBLEM_NUMB)\n correct_answer = 'no'\n if is_number_even(problem):\n correct_answer = 'yes'\n return problem, correct_answer\n","repo_name":"oticko/python-project-lvl1","sub_path":"brain_games/games/brain_even.py","file_name":"brain_even.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"31508048205","text":"#!/usr/bin/env python\n\n\"\"\"\nThe primes 3, 7, 109, and 673, are quite remarkable. By taking any two primes and concatenating them in any order the result will always be prime. For example, taking 7 and 109, both 7109 and 1097 are prime. The sum of these four primes, 792, represents the lowest sum for a set of four primes with this property.\n\nFind the lowest sum for a set of five primes for which any two primes concatenate to produce another prime.\n\"\"\"\n\nfrom eulerfunctions import generate_primes, isPrime\n\nimport itertools as it\n\ndef concat_is_p(a,b,hm):\n if (a,b) in hm or (isPrime(int(str(a)+str(b))) and isPrime(int(str(b)+str(a)))):\n hm[(a,b)] = True\n return True\n return False\n\ndef check_numbers(ai,bi,ci,di,ei, p, dic):\n a,b,c,d,e = p[ai], p[bi], p[ci], p[di], p[ei]\n for (x,y) in [(a,b),(a,c),(a,d),(a,e),(b,c),(b,d),(b,e),(c,d),(c,e),(d,e)]:\n if concat_is_p(a,b,dic) == False:\n return ai,bi+1,ci,di,ei \n if concat_is_p(a,c,dic) == False or concat_is_p(b,c,dic) == False:\n return ai,bi,ci+1,di,ei \n if concat_is_p(a,d,dic) == False or concat_is_p(b,d,dic) == False or concat_is_p(c,d,dic)==False:\n return ai,bi,ci,di+1,ei \n if concat_is_p(a,e,dic) == False or concat_is_p(b,e,dic) == False or concat_is_p(c,e,dic)==False or concat_is_p(d,e,dic)==False:\n return ai,bi,ci,di,ei \n else: \n return -1,0,0,0,0\n \ndef adjust_counter(ind, lim):\n for i in range(len(ind)-1):\n if ind[i+1]<=ind[i]:\n ind[i+1] = ind[i]+1\n if ind[0]>=lim: \n raise ValueError\n \n return ind \n\ndef increment_counter(ind, lim):\n \n def aux(it,prev):\n if len(it)==1:\n return [it[0]+1]\n if len(it)==0:\n return []\n elif it[-1]+1 from %s\" % (channel, thread_link, subject, author)\n request_data = {\n \"token\" : self.slack_bot_auth_token,\n \"channel\" : self.channel,\n \"text\" : text\n }\n requests.post(\"https://slack.com/api/chat.postMessage\",\\\n data = request_data)\n def follow_up_notification(self, thread):\n channel = \"#\"+thread[\"channel\"]\n reply = thread[\"replies\"][-1]\n name = reply[\"name\"]\n subject = reply[\"thread_subject\"]\n thread_link = thread[\"thread_link\"]\n text = \"*%s*\\n%s replied to <%s|%s>\" % (channel, name, thread_link, subject)\n request_data = {\n \"token\" : self.slack_bot_auth_token,\n \"channel\" : self.channel,\n \"text\" : text\n }\n requests.post(\"https://slack.com/api/chat.postMessage\",\\\n data = request_data)\n\n def get_notification_timestamp_channel_id(self, thread):\n thread_link = thread[\"thread_link\"]\n text = \"in:#%s from:@%s '%s'\" % (self.channel, self.username, thread_link)\n search_JSON = {\n \"token\" : self.slack_user_auth_token,\n \"query\" : text,\n \"count\" : \"1\",\n \"sort\" : \"timestamp\",\n \"sort_dir\" : \"asc\"\n }\n result = requests.get(\"https://slack.com/api/search.messages\",\\\n params = search_JSON)\n result = result.json()\n if result['messages'][\"matches\"]:\n timestamp = result['messages'][\"matches\"][0][\"ts\"]\n channel_id = result['messages'][\"matches\"][0][\"channel\"]\n channel_id = channel_id[\"id\"]\n return timestamp, channel_id\n else:\n raise BaseException(\"Message timestamp not found: %s from %s\" %\\\n (thread[\"subject\"], thread[\"author\"]))\n\n def remove_checkbox_react(self, timestamp, channel_id):\n request_data = {\n \"token\" : self.slack_bot_auth_token,\n \"name\" : \"white_check_mark\",\n \"channel\" : channel_id,\n \"timestamp\" : timestamp\n }\n requests.post(\"https://slack.com/api/reactions.remove\", data = request_data)\n\n def add_checkbox_react(self, timestamp, channel_id):\n request_data = {\n \"token\" : self.slack_bot_auth_token,\n \"name\" : \"white_check_mark\",\n \"channel\" : channel_id,\n \"timestamp\" : timestamp\n }\n requests.post(\"https://slack.com/api/reactions.add\", data = request_data)\n\n def add_eyes_react(self, timestamp, channel_id):\n request_data = {\n \"token\" : self.slack_bot_auth_token,\n \"name\" : \"eyes\",\n \"channel\" : channel_id,\n \"timestamp\" : timestamp\n }\n requests.post(\"https://slack.com/api/reactions.add\", data = request_data)\n\n def remove_eyes_react(self, timestamp, channel_id):\n request_data = {\n \"token\" : self.slack_bot_auth_token,\n \"name\" : \"eyes\",\n \"channel\" : channel_id,\n \"timestamp\" : timestamp\n }\n requests.post(\"https://slack.com/api/reactions.remove\", data = request_data)\n\n def watching_detection(self):\n for key in self.seen_posts:\n upvotes = int(self.seen_posts[key][\"upvotes\"])\n timestamp, channel_id = self.get_notification_timestamp_channel_id(self.seen_posts[key])\n if upvotes > 0:\n self.add_eyes_react(timestamp, channel_id)\n else:\n self.remove_eyes_react(timestamp, channel_id)\n\n\n def resolved_detection(self):\n for key in self.known_posts:\n if key not in self.seen_posts:\n #Previously unresolved Question is now resolved\n timestamp, channel_id = self.get_notification_timestamp_channel_id(self.known_posts[key])\n self.add_checkbox_react(timestamp, channel_id)\n self.remove_eyes_react(timestamp, channel_id)\n\n def unresolved_detection(self):\n for key in self.seen_posts:\n if not (key in self.known_posts):\n if self.seen_posts[key][\"replies\"]:\n try:\n timestamp, channel_id = self.get_notification_timestamp_channel_id(self.seen_posts[key])\n self.remove_checkbox_react(timestamp, channel_id)\n self.follow_up_notification(self.seen_posts[key])\n except:\n self.new_thread_notification(self.seen_posts[key])\n else:\n self.new_thread_notification(self.seen_posts[key])\n else:\n replies_seen = len(self.seen_posts[key][\"replies\"])\n replies_known = len(self.known_posts[key][\"replies\"])\n if replies_seen != replies_known:\n timestamp, channel_id = self.get_notification_timestamp_channel_id(self.seen_posts[key])\n self.remove_checkbox_react(timestamp, channel_id)\n self.follow_up_notification(self.seen_posts[key])\n\n def check_for_updates(self):\n self.get_known_posts()\n self.get_unresolved_posts()\n\n if not self.seen_posts:\n self.resolved_detection()\n self.set_known_posts()\n self.driver.quit()\n else:\n if not self.known_posts:\n self.unresolved_detection()\n self.set_known_posts()\n try:\n self.watching_detection()\n except: pass\n else:\n self.unresolved_detection()\n self.resolved_detection()\n self.set_known_posts()\n try:\n self.watching_detection()\n except: pass\n\n def run(self):\n try:\n print(\"Navigating through login screen...\", end=\"\")\n self.navigate_through_login()\n print(\"passed.\")\n print(\"Navigating through community card screen...\", end = \"\")\n self.navigate_through_community_card_options()\n print(\"passed.\")\n print(\"Checking for updates...\", end=\"\")\n self.check_for_updates()\n print(\"passed.\")\n print(\"Quiting driver...\", end=\"\")\n self.driver.quit()\n print(\"done.\")\n print(\"Bye!\")\n except Exception as e:\n print(\"failed.\")\n print(\"Quiting driver...\", end=\"\")\n self.driver.quit()\n print(\"done.\")\n print(\"Bye!\")\n raise\n\n\n\n\nclass Thread:\n def __init__(self, t, driver, channel, channel_id):\n self.channel = channel\n self.channel_id = channel_id\n self.get_attributes(t, driver)\n\n def get_thread_link(self, t, driver):\n #Get query parameters\n c = '99224b2f-9072-46b6-b442-c0c3a785439b'\n thread_id = t.get_attribute(\"id\")\n thread_id = thread_id[thread_id.find(\":\") + 1 : ]\n #Construct final thread_link\n thread_link = \"https://platform.parthean.com/Home?c=%s&v=%s&t=%s\" %\\\n (c, self.channel_id , thread_id)\n return thread_link\n\n def get_attributes(self, t, driver):\n #Author\n self.author = t.find_element_by_class_name(\"header-left-side\")\\\n .find_elements_by_tag_name(\"span\")[0]\\\n .get_attribute(\"textContent\")\n #Subject\n self.subject = t.find_element_by_class_name(\"subject\")\\\n .find_element_by_tag_name(\"h2\")\\\n .get_attribute(\"textContent\")\n #Original Post\n self.message = t.find_element_by_id(\"firstMessageText\")\\\n .find_element_by_tag_name(\"p\")\\\n .get_attribute(\"textContent\")\n #Replies\n self.replies = self.get_replies(t, driver, self.channel)\n #Upvotes\n self.upvotes = t.find_element_by_id(\"threadReplies\")\\\n .find_element_by_class_name(\"options-toolbar-item\")\\\n .find_elements_by_tag_name(\"span\")[1]\\\n .get_attribute(\"textContent\")\n #Thread Link\n self.thread_link = self.get_thread_link(t, driver)\n\n def get_hashables(self):\n return (self.author, self.subject,self.message, self.channel)\n\n def __repr__(self):\n return \"%s,%s,%s,%s\" % self.get_hashables()\n\n def __hash__(self):\n hl = md5()\n hl.update(str(self).encode(\"utf-8\"))\n return int(hl.hexdigest(), 16)\n\n def __eq__(self,other):\n return (isinstance(other, Thread)) and\\\n (self.author == other.author) and\\\n (self.subject == other.subject) and\\\n (self.message == other.message) and\\\n (self.replies == other.replies)\n\n #Returns a tuple containing a thread's replies\n def get_replies(self, t, driver, channel):\n #Find replies button\n replies_button = t.find_element_by_id(\"threadReplies\")\\\n .find_element_by_css_selector('button.back-button.replies-button')\n #Click the replies button\n replies_button.click()\n #Read the list of replies\n replies = t.find_element_by_id(\"threadReplies\")\\\n .find_element_by_id(\"messageList\")\\\n .find_elements_by_css_selector(\"div.bodyArea.replyWrapper\")\n replies_list = list()\n for r in replies:\n reply = Reply(r, t, channel)\n replies_list.append(reply.__dict__)\n return replies_list\n\nclass Reply:\n def __init__(self, r, t, channel):\n self.channel = channel\n self.name, self.message, \\\n self.thread_subject = self.get_attributes(r,t)\n\n def get_attributes(self, r, t):\n #Extract the reply info\n name = r.find_element_by_class_name(\"message-header\")\\\n .find_element_by_tag_name(\"p\").get_attribute(\"textContent\")\n message = r.find_element_by_class_name(\"message-body\")\\\n .find_element_by_tag_name(\"p\").get_attribute(\"textContent\")\n thread_subject = t.find_element_by_class_name(\"subject\")\\\n .find_element_by_tag_name(\"h2\")\\\n .get_attribute(\"textContent\")\n return name, message, thread_subject\n\n def get_hashables(self):\n return(self.name, self.message)\n\n def __repr__(self):\n return \"%s, %s, %s\" % self.get_hashables()\n \n def __hash__(self):\n hl = md5()\n hl.update(self.name.encode('utf-8'))\n hl.update(self.message.encode('utf-8'))\n return int(hl.hexdigest(), 16)\n\n def __eq__(self, other):\n return (isinstance(other, Reply)) and\\\n (self.name == other.name) and\\\n (self.message == other.message)\n","repo_name":"aceamarco/Parthean-Notifications-Slackbot-Public","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":15156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"72500717129","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n# author zym\n\nfrom confluence import Confluence\nfrom runoff_generation import Runoff,dt_range\nfrom utils import *\n\nfrom numba import jit\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\n\nfrom bayes_opt import BayesianOptimization\n\nnrows = 38\nncols = 35\ngrid_nums = nrows * ncols\n\n# tensor\n\n# 日蒸发数据\nday_evap = np.loadtxt(r'F:\\我的论文\\python程序\\数据预处理\\蒸发数据处理\\day_evap.txt')\n# 降雨数据\ngrid_rain = np.loadtxt(r'F:\\我的论文\\python程序\\数据预处理\\3000数据\\网格雨量数据计算\\grid_rain.txt')\n# 读取真实流量数据\nreal_q = np.loadtxt(r'F:\\我的论文\\python程序\\数据预处理\\真实流量数据\\真实流量数据.txt')\n\n# 汇水栅格标记数据\ngrid_order = np.loadtxt(\n r'F:\\我的论文\\python程序\\数据预处理\\3000数据\\汇水栅格统计\\汇水栅格次序.txt',\n dtype=np.int)\ngrid_order = grid_order.flatten() # 按格子数展平数据\n# 河道栅格标记数据\ngrid_river = np.loadtxt(\n r'F:\\我的论文\\python程序\\数据预处理\\3000数据\\riverway.txt',\n dtype=np.int)\nrivers = np.argwhere(grid_river == 1)\nriver_mark_list = [[rivers[i, 0], rivers[i, 1]]\n for i in range(rivers.shape[0])]\n# 栅格流向数据,建立递归关系\nfdr = np.loadtxt(r'F:\\我的论文\\python程序\\数据预处理\\3000数据\\fdr3000.txt', skiprows=6)\n# 栅格汇水累���量数据,估算河道宽度\nfac = np.loadtxt(r'F:\\我的论文\\python程序\\数据预处理\\3000数据\\fac3000.txt',skiprows=6)\n\n\n# flow\n\nclass Distribute_predict():\n\n def __init__(self,wm, kc, ki, kg, b,a3, b3, a4, b4, cs, ci, cg,dt=24, dx=3):\n # 产流参数\n self.WM = wm # 土壤蓄水容量\n self.KC = kc # 蒸散发折算系数\n self.KI = ki # 壤中径流出流系数\n self.KG = kg # 地下径流出流系数\n self.B = b # 反映坡度对出流的影响\n\n # 汇流参数\n # 波速计算的经验参数\n self.a3 = a3\n self.b3 = b3\n # 河道宽度计算的经验参数\n self.a4 = a4\n self.b4 = b4\n # 地表水线性水库汇流系数 todo\n self.CS = cs\n # 壤中流线性水库退水系数\n self.CI = ci\n # 地下水线型水库退水系数\n self.CG = cg\n\n # 时间步长、空间步长\n self.dt = dt\n self.dx = dx\n # 单位转换系数\n self.U = 3 * 3 / (3.6 * 24)\n # 出口位置\n self.output_i = 29\n self.output_j = 7\n\n # 模型产流计算\n @jit\n def _distributed_runoff(self):\n runoff = Runoff(self.WM, self.KC, self.KI, self.KG, self.B)\n runoff_result = []\n for grid_id in range(grid_nums):\n i,j = id_toij(grid_id)\n if fac[i,j] >= 0:\n r_all = runoff.eva_runoff(grid_id, grid_rain[grid_id, :], day_evap)\n runoff_result.append(r_all)\n else:\n runoff_result.append(-9999) # 对于非汇水栅格标记为-9999,表示NoData\n\n return runoff_result\n\n # 模型汇流计算\n def distributed_confluence(self,output_ij_list):\n\n # 获取产流数据\n grid_runoff = self._distributed_runoff()\n\n # 汇流计算实例化\n confluence = Confluence(self.dt, self.dx, self.a3, self.b3, self.a4, self.b4, self.CS, self.CI, self.CG)\n\n # 河道栅格汇流计算\n\n def river_flow(i, j):\n k, grid_input = find_input(i, j, fdr) # 找到所有指向该河道栅格的栅格\n not_river_list = [\n item for item in grid_input if item not in river_mark_list]\n river_list = [item for item in grid_input if item in river_mark_list]\n # 线性叠加所有非河道栅格的坡地汇流之后的过程\n R_not_river = np.zeros(day_evap.size)\n for ij in not_river_list:\n grid_id = ij_toid(ij, ncols)\n RS, RI, RG = grid_runoff[grid_id]\n # 坡面汇流之后的结果线性叠加(序列值)\n RS_slope = confluence.surface_confluence(RS)\n RI_slope = confluence.interflow_confluence(RI)\n RG_slope = confluence.underground_confluence_1(RG)\n R_not_river += (RS_slope + RI_slope + RG_slope)\n if not river_list: # 到了河道栅格的源头了\n # 该栅格本身所产生的净雨作为旁侧入流处理\n grid_id = ij_toid([i,j])\n RS,RI,RG = grid_runoff[grid_id]\n qlat = (RS + RI + RG) * self.U # 旁侧入流\n # 此时所有上游栅格线性叠加作为计算栅格的入流过程\n R = np.zeros(day_evap.size)\n for dt_id in range(1,day_evap.size):\n R[dt_id] = confluence.musking_cunge(\n R_not_river[dt_id-1],R_not_river[dt_id],R[dt_id-1],qlat[dt_id],fac[i,j])\n return R # 返回该河道栅格出流过程\n else:\n # 如果不是源头河道栅格,即该栅格上游仍有河道栅格汇入\n # 上游河道栅格的出流过程线性叠加作为计算栅格的入流过程\n R_in = np.zeros(day_evap.size)\n for ij in river_list:\n R = river_flow(ij[0],ij[1]) # 递归运算,算法精髓!!\n R_in += R\n # 坡面栅格的出流过程与栅格本身净雨(产流)作为旁侧入流\n grid_id = ij_toid([i,j])\n RS,RI,RG = grid_runoff[grid_id] # 本身产流\n qlat = R_not_river + (RS + RI + RG) * self.U # 旁侧入流\n R = np.zeros(day_evap.size)\n for dt_id in range(1,day_evap.size):\n R[dt_id] = confluence.musking_cunge(\n R_in[dt_id-1],R_in[dt_id],R[dt_id-1],qlat[dt_id],fac[i,j])\n return R\n\n q = np.zeros(day_evap.size)\n for ij_tuple in output_ij_list:\n q += river_flow(ij_tuple[0],ij_tuple[1])\n\n return q\n\n # # 非河道栅格汇流计算,栅格距离河道栅格最多也就五步,可以忽略 #todo\n # def overland_flow(i,j):\n # k,grid_input = find_input(i,j,fdr)\n # # 找出非河道集水栅格\n # grid_input = [item for item in grid_input if item not in river_mark_list]\n # if not grid_input: # 当此栅格为源头栅格时\n\n# 可视化\ndef q_vision(q,real_q):\n plt.rcParams['font.sans-serif'] = ['SimHei']\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y%m'))\n\n ax.set_xlabel('时间')\n ax.set_ylabel('流量' + '$\\mathrm{m}^{3}$' + '/s')\n ax.plot(dt_range,q,label='预测流量')\n ax.plot(dt_range,real_q,label='实测流量')\n ax.legend(loc=1)\n plt.show()\n\n@time_lag\ndef main(wm, kc, ki, kg, b, a3, b3, a4, b4, cs, ci, cg):\n distribute_predict = Distribute_predict(wm, kc, ki, kg, b,a3, b3, a4, b4, cs, ci, cg)\n q = distribute_predict.distributed_confluence([(29, 7), (31, 11), (28, 16), (8,31), (34,8)])\n q_vision(q,real_q)\n dc = dc_calculate(q,real_q)\n print(f'计算总径流:{q.sum()}')\n print(f'实测总径流:{real_q.sum()}')\n print(f'确定性系数dc值为:{dc}')\n\n # return dc\n\n# 贝叶斯优化调参\ndef bayes_optimize():\n bo = BayesianOptimization(main,{'wm':(100,220),\n 'kc':(0.6,1.2),\n 'ki':(0.2,0.6),\n 'kg':(0.2,0.6),\n 'b':(2,2),\n 'a3':(0.1,1),\n 'b3':(0.1,1),\n 'a4':(0.5,2),\n 'b4':(1,3),\n 'cs':(0.4,0.7),\n 'ci':(0.5,0.9),\n 'cg':(0.9,0.998)})\n bo.explore({'wm':[150],'kc':[0.6],'ki':[0.35],'kg':[0.35],'b':[2],\n 'a3':[0.5],'b3':[0.5],'a4':[1.15],'b4':[1.78],\n 'cs':[0.6],'ci':[0.75],'cg':[0.995]})\n bo.maximize(init_points=10,acq='poi')\n print(bo.res['max'])\n\nif __name__ == '__main__':\n main(150,0.6,0.35,0.35,2,0.5,0.5,1.15,1.78,0.6,0.75,0.995)\n # bayes_optimize()\n","repo_name":"zymspindrift/distributed_model","sub_path":"distributed_model/pipline.py","file_name":"pipline.py","file_ext":"py","file_size_in_byte":8441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"38786303005","text":"n = int(input())\na = list(map(int, input().split()))\n\na.sort(reverse=True)\neven = []\nfor i in range(n):\n if a[i] % 2 == 0:\n even.append(a[i])\n\nthree = 0\nfor i in range(len(even)):\n sub = even[i]\n while sub % 2 == 0:\n three += 1\n sub = sub / 2\n\nprint(three)","repo_name":"RuRey0310/Competitive_Programming","sub_path":"ABC100~150/ABC100/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"72849911369","text":"from collections import Counter\n\ninput_ = \"./data.txt\"\n\n\ndef getMostCommon(filename):\n columns = None\n with open(input_, \"r\") as file:\n for tot, line in enumerate(file):\n l = line.strip()\n if columns is None:\n columns = [\"\"] * len(l)\n for i, char in enumerate(l):\n columns[i] += char\n counters = [Counter(l) for l in columns]\n return [int(c[\"1\"] > tot / 2) for c in counters]\n\n\ndef to_decimal(bits):\n return int(\"\".join(str(b) for b in bits), 2)\n\n\ndef main():\n gamma = getMostCommon(input_)\n epsilon = [1 - x for x in gamma]\n\n print(to_decimal(epsilon) * to_decimal(gamma))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"lucblassel/AoC_2021","sub_path":"3/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"73558779848","text":"import heapq\n\ndef solution(operations):\n answer = []\n min_heap = []\n max_heap = []\n for op in operations:\n [command, n_str] = op.split(\" \")\n n = int(n_str)\n if command == \"I\":\n heapq.heappush(min_heap, n)\n heapq.heappush(max_heap, -n)\n elif command == \"D\" and n == 1:\n if len(max_heap) < 1:\n continue\n heapq.heappop(max_heap)\n min_heap = []\n for ele in max_heap:\n heapq.heappush(min_heap, -ele)\n elif command == \"D\" and n == -1:\n if len(min_heap) < 1:\n continue\n heapq.heappop(min_heap)\n max_heap = []\n for ele in min_heap:\n heapq.heappush(max_heap, -ele)\n answer = [0,0] if len(min_heap) < 1 else [-(heapq.heappop(max_heap)), heapq.heappop(min_heap)]\n \n return answer\n\ndef solution2(operations):\n answer = []\n max_heap = []\n min_heap = []\n for operation in operations:\n # print(operation)\n command, n = operation.split(\" \")\n n = int(n)\n if command == \"I\":\n heapq.heappush(min_heap, n)\n heapq.heappush(max_heap, -n)\n elif command == \"D\":\n if len(max_heap) < 1:\n continue\n if n == 1:\n heapq.heappop(max_heap)\n target = min_heap\n source_heap = max_heap\n else:\n heapq.heappop(min_heap)\n target = max_heap\n source_heap = min_heap\n target.clear()\n for ele in source_heap:\n heapq.heappush(target, -ele)\n # print(min_heap)\n # print(max_heap)\n if len(max_heap) < 1:\n return [0, 0]\n answer.append(heapq.heappop(max_heap) * -1)\n answer.append(heapq.heappop(min_heap))\n return answer\n\n# [0,0]\nprint(solution([\"I 16\", \"I -5643\", \"D -1\", \"D 1\", \"D 1\", \"I 123\", \"D -1\"]))\n\n# [333, -45]\nprint(\n solution([\n \"I -45\", \"I 653\", \"D 1\", \"I -642\", \"I 45\", \"I 97\", \"D 1\", \"D -1\",\n \"I 333\"\n ]))\n\n# [0,0]\nprint(solution2([\"I 16\", \"I -5643\", \"D -1\", \"D 1\", \"D 1\", \"I 123\", \"D -1\"]))\n\n# [333, -45]\nprint(\n solution2([\n \"I -45\", \"I 653\", \"D 1\", \"I -642\", \"I 45\", \"I 97\", \"D 1\", \"D -1\",\n \"I 333\"\n ]))","repo_name":"jayhyun-hwang/jhLeetCode","sub_path":"python-code/programmers/L3/이중우선순위큐.py","file_name":"이중우선순위큐.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"13084808390","text":"# https://school.programmers.co.kr/learn/courses/30/lessons/1845\n\ndef solution(nums):\n\n list1 = [-1]\n\n for n in nums:\n for s in range(len(list1)):\n if n == list1[s]:\n break\n elif (s == len(list1) - 1):\n list1.append(n)\n\n answer = 0\n\n if (len(list1) - 1 < len(nums) / 2):\n answer = len(list1) - 1\n else:\n answer = len(nums) / 2\n return answer\n\n\n# hash는 set을 이용하는것이 키포인트\ndef bestSolution(nums):\n print(set(nums))\n return min(len(nums)/2, len(set(nums)))\n\n\na = [3, 1, 2, 3]\nprint(bestSolution(a))\n","repo_name":"EEDK/2020-2-INUCS-Algorithm","sub_path":"Problems/hash/problem.py","file_name":"problem.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34922010594","text":"import os\nimport json\nimport datetime\nimport requests\nfrom typing import List, Dict\nfrom load_env import load_environ\n\nload_environ()\n\nbase_url = \"https://hotels4.p.rapidapi.com\"\n\nheaders = {\n \"X-RapidAPI-Key\": os.environ.get('RAPIDAPI_KEY'),\n \"X-RapidAPI-Host\": \"hotels4.p.rapidapi.com\"\n}\n\n\ndef make_get_request(url: str, head: Dict, querystring: Dict) -> requests.Response:\n try:\n return requests.request('GET', url=url, headers=head, params=querystring, timeout=10)\n except requests.exceptions.ReadTimeout as e:\n print(e)\n\n\ndef get_landmark_destination_id(city: str) -> int | bool:\n url = base_url + '/locations/v2/search'\n querystring = {\"query\": city}\n\n response = make_get_request(url=url, head=headers, querystring=querystring)\n\n if response and response.status_code == 200:\n body = json.loads(response.text)\n try:\n return body['suggestions'][0]['entities'][0]['destinationId']\n except IndexError as e:\n print(e)\n except KeyError as e:\n print(e)\n return False\n\n\ndef get_hotel(\n destination_id: int,\n count: int,\n sort_hotel: str,\n p_range: str,\n d_range: str,\n check_in: datetime.date,\n check_out: datetime.date\n) -> List[Dict] | bool:\n\n dict_of_transform_sort = {\n 'ASC': 'PRICE',\n 'DESC': 'PRICE_HIGHEST_FIRST'\n }\n\n url = base_url + '/properties/list'\n\n if check_in is None:\n check_in = datetime.date.today()\n if check_out is None:\n check_out = datetime.date.today()\n\n querystring = {\n \"destinationId\": destination_id,\n \"pageNumber\": \"1\",\n \"pageSize\": \"25\",\n \"checkIn\": check_in,\n \"checkOut\": check_out,\n \"adults1\": \"1\",\n \"sortOrder\": dict_of_transform_sort[sort_hotel],\n \"locale\": \"en_US\",\n \"currency\": \"USD\"\n }\n if p_range:\n start_price, stop_price = p_range.split('-')\n querystring = {\n \"destinationId\": destination_id,\n \"pageNumber\": \"1\",\n \"pageSize\": \"25\",\n \"checkIn\": check_in,\n \"checkOut\": check_out,\n \"adults1\": \"1\",\n \"priceMin\": start_price,\n \"priceMax\": stop_price,\n \"sortOrder\": \"DISTANCE_FROM_LANDMARK\",\n \"locale\": \"en_US\",\n \"currency\": \"USD\",\n \"landmarkIds\": \"City Center\"\n }\n\n response = make_get_request(url=url, head=headers, querystring=querystring)\n if response and response.status_code == 200:\n body = json.loads(response.text)\n results = body['data']['body']['searchResults']['results']\n\n data_to_ret = []\n try:\n if not d_range:\n for i_res in results[:count]:\n data_to_ret.append({\n 'id': f\"{i_res['id']}\",\n 'name': f\"{i_res['name']}\",\n 'address': f\"{i_res['address']['locality']} {i_res['address']['streetAddress']}\",\n 'distance_to_the_center': f\"{i_res['landmarks'][0]['distance']}\",\n 'price': f\"{i_res['ratePlan']['price']['current']}\",\n })\n else:\n i = 0\n start_dist, stop_dist = d_range.split('-')\n while len(data_to_ret) < count:\n if float(start_dist) < float(results[i]['landmarks'][0]['distance'].split(' ')[0]) < float(stop_dist):\n data_to_ret.append({\n 'id': f\"{results[i]['id']}\",\n 'name': f\"{results[i]['name']}\",\n 'address': f\"{results[i]['address']['locality']} {results[i]['address']['streetAddress']}\",\n 'distance_to_the_center': f\"{results[i]['landmarks'][0]['distance']}\",\n 'price': f\"{results[i]['ratePlan']['price']['current']}\"\n })\n i += 1\n return data_to_ret\n except IndexError as e:\n print(e)\n except KeyError as e:\n print(e)\n return False\n\n\ndef get_hotel_photos(hotels: Dict, count_photos: int) -> List[str] | bool:\n url = base_url + '/properties/get-hotel-photos'\n querystring = {\"id\": hotels['id']}\n\n response = make_get_request(url=url, head=headers, querystring=querystring)\n if response and response.status_code == 200:\n body = json.loads(response.text)\n photos_list = []\n for i in range(count_photos):\n photos_list.append(body['hotelImages'][i]['baseUrl'])\n return photos_list\n return False\n","repo_name":"daniil49926/API_Telegram_bot","sub_path":"api_logic.py","file_name":"api_logic.py","file_ext":"py","file_size_in_byte":4660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"44410826693","text":"import urllib.parse as urlparse\nfrom urllib.parse import parse_qs\nimport requests\n\ndef find_revid(url):\n parsed = urlparse.urlparse(url)\n q = (parse_qs(parsed.query)['oldid'])\n if True:\n print(q)\n else:\n raise KeyError('invalid url')\n\nfind_revid('https://en.wikipedia.org/w/index.php?oldid=935784560')\n\n\ndef revid_to_qid(revid, lang):\n lang = \"en\"\n S = requests.Session()\n\n URL = \"https://en.wikipedia.org/w/api.php\"\n\n PARAMS = {\n \"action\": \"query\",\n \"prop\": \"pageprops\",\n \"revids\": \"revid\",\n \"format\": \"json\"\n }\n\n R = S.get(url=URL, params=PARAMS)\n DATA = R.json()\n\n qid = DATA[\"query\"][\"pages\"][\"29828568\"][\"pageprops\"][\"wikibase_item\"]\n jprint(qid)\n\n\nrevid_to_qid(935784560, \"en\")","repo_name":"iammyvu/wikipedia-project","sub_path":"find_revid.py","file_name":"find_revid.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"42371771909","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# Σε αυτό το στάδιο εξερευνούμε τα δεδομένα έτσι ώστε να τα \"γνωρίσουμε καλύτερα\".\n# Τα ερωτήματα που θα απαντηθούν από τα δεδομένα σε αυτό το στάδιο:\n# * Ποιοι είναι οι χρήστες με τους πιο πολλούς ακολούθους;\n# * Σε ποια περιοχή διαμένουν οι περισσότεροι χρήστες;\n# * Ποιες είναι οι πιο συνηθισμένες συσκευές που επιλέγουν οι χρήστες;\n# * Πως διακυμένονται τα tweets σύμφωνα με την ημερομηνία; Ποια είναι η μέρα με τις πιο πολλές δημοσιεύσεις;\n# * Τι ποσοστό των tweets περιέχουν link, hashtag, mention;\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport re\n\ndf = pd.read_csv('data/final.csv')\ndf.head()\nprint(\"Number of tweets: {}\".format(len(df)))\n\n#Visualize the Source column percentages.\ncolors = ['#93baf5','#6081b5', '#cbf7e6', '#5db0d4', '#7fa0fa']\np = df.Source.value_counts().head(5).plot.pie(x='lab', y='val', autopct='%1.1f%%', rot=2, colors=colors, figsize=(15,10));\np.set_title(\"Σηνυθισμένες συσκευές που χρησιμοποιούν οι Έλληνες του Twitter\");\n\ntweet_id = 222\ntweet = df.iloc[tweet_id]\nprint(\"Tweet: {}\".format(tweet[\"Tweet\"]))\n\n# Με τη συνάρτηση df.groupby().mean().nlargest() εντοπίζουμε τους χρήστες \n# με τους περισσότερους ακολούθους και τους κατατάσουμε με το όνομα χρήστη τους.\n\nmost_followers = df.groupby('Username')['Followers'].mean().nlargest(15)\nmost_followers\n\nax = most_followers.plot(kind='barh', figsize=(10, 12), color='#b4aeeb', zorder=2, width=0.85)\nplt.gca().invert_yaxis()\nsns.despine(bottom = True, left = True)\nplt.ylabel(None)\nplt.xticks(None)\nplt.xticks([])\nplt.yticks(fontsize=18, rotation=0)\n\nfor index, value in enumerate(most_followers):\n plt.text( value, index, str(round(value, 2)), va = 'center', ha='left', fontsize=16)\n \nplt.suptitle('Οι χρήστες με τους περισσότερους followers'.title(), fontsize=20)\nplt.show()\n\nmost_tweets_users = df.Username.value_counts().reset_index()\nmost_tweets_users.columns = ['Username','counts']\nmost_tweets_users.head(20)\n\nusers = df['Username'].apply(pd.Series).stack().value_counts().head(10)\nax = users.plot(kind='barh', figsize=(10, 12), zorder=2, width=0.85)\nplt.gca().invert_yaxis()\nsns.despine(bottom = True, left = True)\nplt.ylabel(None)\nplt.xticks(None)\nplt.xticks([])\nplt.yticks(fontsize=18, rotation=0)\n\nfor index, value in enumerate(users):\n plt.text( value, index, str(round(value, 2)), va = 'center', ha='left', fontsize=16)\n \nplt.suptitle('Οι χρήστες που εμφανίζονται πιο πολύ στο αρχείο δεδομένων'.title(), fontsize=15)\nplt.show()\n\n\n# Βρίσκουμε τη περιοχή από την οποία δημοσιεύτηκαν τα περισσότερα tweets.\nmost_tweets_users = df.Location.value_counts().reset_index()\nmost_tweets_users.columns = ['Location','counts']\nmost_tweets_users.head(20)\n\ncolors = ['#93baf5','#d5abd9', '#cbf7e6', '#5db0d4', '#ebcaea','#a2ebe1','#a9aade']\n\np = df.Location.value_counts().head(7).plot.pie(x='Location', autopct='%1.1f%%', startangle=90, \n rot=2, colors=colors, figsize=(14,10));\np.set_title(\"Location pie chart\");\nplt.ylabel(None);\n\ndate_counts = df[['Tweet', 'Date']].groupby(['Date']).count().reset_index()\nmost_tweets = date_counts.groupby('Date')['Tweet'].mean().nlargest(20).reset_index()\nmost_tweets.columns = ['date','count']\nmost_tweets.head(20)\n\nimport plotly.express as px\npx.line(most_tweets, x = 'date', y = 'count', title = 'Tweet counts per day lineplot')\n\n\n# Υπολογίζουμε τα Tweets που περιέχουν hashtag, το οποίο συμβολίζεται με '#'.\ntweets_with_hashtag = df[df['Tweet'].str.contains('#')==True]\nprint(\"Ο αριθμός των tweets που περιέχουν hashtag: {}\".format(len(tweets_with_hashtag)))\n\n\n# Με ανάλογο τρόπο μπορούμε να εντοπίσουμε και τα tweets που δεν περιέχουν hashtag σε μια συλλογή με tweets.\ntweets_without_hashtag = df[df['Tweet'].str.contains('#')==False]\nprint(\"Ο αριθμός των tweets που δεν περιέχουν hashtag: {}\".format(len(tweets_without_hashtag)))\n\n#Save the file\nhastag_frame = pd.read_csv('HASHTAG.csv')\n\nhashtag = hastag_frame.has_hashtag.value_counts().plot.pie(x='lab', y='val', autopct='%1.1f%%', rot=2,figsize=(12,8));\nplt.suptitle('Ποσοστό δημοσιεύσεων με hashtag'.title(), fontsize=20);\n\n\n# Υπολογίζουμε τα tweets που περιέχουν link και στη συνέχεια βλέπουμε με την εντολή sum() τον αριθμό τους.\ntweets_with_url = df[df['Tweet'].str.contains('http')==True]\nprint(\"Ο αριθμός των tweets που περιέχουν link: {}\".format(len(tweets_with_url)))\n\n# Επιβεβαιώνουμε την ύπαρξη link σε κάποιες από τις εγγραφές\ntweets_with_url['Tweet'][37756]\n# Υπολογίζουμε τα tweets που δεν περιέχουν link και στη συνέχεια βλέπουμε με την εντολή sum() τον αριθμό τους.\ntweets_without_url = df[df['Tweet'].str.contains('http')==False]\nprint(\"Ο αριθμός των tweets που δεν περιέχουν link: {}\".format(len(tweets_without_url)))\n\nurl_frame = pd.read_csv('url_frame.csv')\ntag = url_frame.has_url.value_counts().head(5).plot.pie(x='lab', y='val', autopct='%1.1f%%', rot=2,figsize=(12,8));\nplt.suptitle('Ποσοστό δημοσιεύσεων με link'.title(), fontsize=20);\n\n\n# Υπολογίζουμε τα tweets που περιέχουν αναφορά (mention) η οποία στο Twitter συμβολίζεται με @.\ntweets_with_mention = df[df['Tweet'].str.contains('@')== True]\nprint(\"Ο αριθμός των tweets που περιέχουν mention: {}\".format(len(tweets_with_mention)))\n\ntweets_without_mention = df[df['Tweet'].str.contains('@')== False]\nprint(\"Ο αριθμός των tweets που δεν περιέχουν mention: {}\".format(len(tweets_without_mention)))\n\nmention_frame = pd.read_csv('MENTIONS.csv')\n\ntag = mention_frame.has_mention.value_counts().head(5).plot.pie(x='lab', y='val', autopct='%1.1f%%', rot=2,figsize=(12,8));\nplt.suptitle('Ποσοστό δημοσιεύσεων με mention'.title(), fontsize=20);\n\ntweets_with_emojis = df[df['Tweet'].str.contains('😂')== True]\nprint(\"Ο αριθμός των tweets που περιέχουν το emoji: {}\".format(len(tweets_with_emojis)))\n\ntweets_with_emojis['Tweet'][198]\n\n# Υπολογίζουμε τα tweets που είναι retweets και όχι αυτούσια. Στο twitter αυτό συμβολίζεται με RT\n\nretweets = df[df['Tweet'].str.startswith('RT')== True]\nprint(\"Ο αριθμός των retweets: {}\".format(len(retweets)))\n\nretweets = pd.read_csv('Retweeted.csv')\n\nno_retweets = df[df['Tweet'].str.startswith('RT')== False]\nprint(\"Ο αριθμός των tweets που δεν είναι retweet: {}\".format(len(no_retweets)))\n\nretweets_plot = retweets.is_retweet.value_counts().head().plot.pie(x='lab', y='val', autopct='%1.1f%%', rot=2,figsize=(12,8));\nplt.suptitle('Ποσοστό δημοσιεύσεων που είναι retweets'.title(), fontsize=20);\n\n","repo_name":"Minakoaino/Sentiment-Analysis-Greek-tweets","sub_path":"python_files/1.Analyse_tweets.py","file_name":"1.Analyse_tweets.py","file_ext":"py","file_size_in_byte":7797,"program_lang":"python","lang":"el","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"45175795440","text":"person = input('Enter your name: ')\nprint(\"Hello \"+ person + \". Welcome to the Number Guessing game!\")\n\ninput_var = None\n\nwhile type(input_var) is not int:\n input_var = input (\"Please choose the lower end of your range of numbers: \")\n if input_var.isdigit():\n input_var=int(input_var)\n else:\n print(\"Input is not valid. Please enter an integer\")\n\ninput_var2 = None\n\nwhile type(input_var2) is not int:\n\tinput_var2 = input(\"Now, please choose the higher end of your range of numbers: \")\n\tif input_var2.isdigit():\n\t input_var2=int(input_var2)\n\telse:\n\t print(\"Input is not valid. Please enter an integer\")\n\nprint (\"You set a range of\", int(input_var), \"and\", int(input_var2))\n\nimport random\nnumber= random.randrange(int(input_var), int(input_var2)) \n\nprint (\"I've picked a number from the range that you've chosen. Which number did I pick?\")\n\nguess = None\nnumber = int(number)\n\nwhile type(guess) is not int:\n guess = input('Press any number to continue')\n if guess.isdigit():\n guess=int(guess)\n else:\n print(\"Error, only integers allowed. Please try again.\")\n while type(guess) is int:\n guess = int(input(\"My guess is: \"))\n if (int(guess) < number): \n print (\"Incorrect. The number is higher\")\n if (int(guess) > number): \n print (\"Incorrect. The number is lower\")\n if (int(guess) == number): \n print(\"Congratulations! You guessed the number\")","repo_name":"xiaoshel/SheHacks","sub_path":"Challenge1.py","file_name":"Challenge1.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"24589061352","text":"from rest_framework import serializers\nfrom .models import Category, Product\n\n\nclass CategorySerializer(serializers.ModelSerializer):\n class Meta:\n model = Category\n exclude = ('place',)\n\n\nclass ProductSerializer(serializers.ModelSerializer):\n cover = serializers.ImageField(required=False)\n\n class Meta:\n model = Product\n exclude = ('category',)\n","repo_name":"akbarnurullaev/places-backend","sub_path":"products/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"31333407927","text":"# coding:utf-8\n\nimport os\nimport scipy.io as sio\nimport numpy\nimport Image\n\n\ndef function(dirname, filename, matname):\n record = {}\n s = \"23456789ABCDEFGHJKLMNPQRSTUVWXYZ\"\n for dir in os.listdir(dirname):\n if os.path.isdir(dirname + dir):\n l = os.listdir(dirname + dir)\n for i in l:\n record[int(i[:-4])] = dir\n f = open(filename, \"w\")\n for i in record:\n f.write(record[i] + \"\\\\\" + str(i) + \".jpg \" + str(s.index(record[i])) + \"\\n\")\n f.close()\n\n # 以下创建mat文件\n label = numpy.zeros((0, 1), dtype=\"uint8\")\n data = numpy.zeros((0, 400), dtype=\"int\")\n f = open(filename, \"r\")\n while True:\n line = f.readline()\n if line == '':\n break\n tmp = line.split()\n img = numpy.array(Image.open(dirname + tmp[0]).convert(\"L\")).reshape(1, 400)\n data = numpy.row_stack((data, img))\n label = numpy.row_stack((label, int(tmp[1])))\n\n sio.savemat(matname, {'data': data, 'label': label})\n\n\nfunction(\"./test/\", \"./test/test.txt\", \"./test/dataTrain.mat\")\nfunction(\"./train/\", \"./train/train.txt\", \"./train/dataTrain.mat\")\n","repo_name":"Lijinging/mis.ustc","sub_path":"makeData.py","file_name":"makeData.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41894988238","text":"from typing import Tuple\nimport numpy as np\nimport pickle\nfrom .transition import Transition\n\nclass ExperienceBuffer:\n \"\"\"ExperienceBuffer stores transitions for training\"\"\"\n\n def __init__(self, observation_len: int, capacity: int):\n self._obs_tm1_buf = np.empty((capacity, observation_len), dtype=np.byte)\n self._act_tm1_buf = np.empty((capacity, 1), dtype=np.byte)\n self._obs_t_buf = np.empty((capacity, observation_len), dtype=np.byte)\n self._rew_t_buf = np.empty((capacity, 1), dtype=np.float64)\n self._terminal_t_buf = np.empty((capacity, 1), dtype=bool)\n self._sample_range = np.arange(0, capacity, dtype=np.int)\n self.capacity = capacity\n self.oldest_entry = 0\n self.size = 0\n\n def get_update_indices(self, batch_size):\n \"\"\"\n Get indices of oldest entries in buffer.\n \"\"\"\n max_entry = self.oldest_entry + batch_size\n if max_entry <= self.capacity:\n return list(range(self.oldest_entry, max_entry))\n part1 = list(range(self.oldest_entry, self.capacity)) # end of buffer\n part2 = list(range(max_entry - self.capacity)) # start of buffer\n return + part1 + part2\n\n def add_transitions(self,\n observation_tm1: np.ndarray,\n action_tm1: np.ndarray,\n reward_t: np.ndarray,\n observation_t: np.ndarray,\n terminal_t: np.ndarray):\n \"\"\"Add a transition to buffer.\n Args:\n observation_tm1 -- source observation. shape (batch_size, observation_len)\n action_tm1 -- action taken from source to destination state. shape (batch_size, 1)\n observation_t -- destination observation. batch of shape (batch_size, observation_len)\n reward_t -- reward for getting from source to destination state. shape (batch_size, 1)\n terminal_t -- flag showing whether the destination state is terminal. shape (batch_size, 1)\n \"\"\"\n batch_size = len(observation_tm1)\n \n # new batch is written into middle of buffer\n if self.oldest_entry + batch_size <= self.capacity:\n self._obs_tm1_buf[self.oldest_entry : self.oldest_entry + batch_size, :] = observation_tm1\n self._act_tm1_buf[self.oldest_entry : self.oldest_entry + batch_size, :] = action_tm1\n self._obs_t_buf[self.oldest_entry : self.oldest_entry + batch_size, :] = observation_t\n self._rew_t_buf[self.oldest_entry : self.oldest_entry + batch_size, :] = reward_t\n self._terminal_t_buf[self.oldest_entry : self.oldest_entry + batch_size, :] = terminal_t\n self.size = max(self.size, self.oldest_entry + batch_size)\n self.oldest_entry = (self.oldest_entry + batch_size) % self.capacity\n \n # while writing batch into the buffer, end of buffer is reached\n else:\n tail = self.oldest_entry + batch_size - self.capacity\n self._obs_tm1_buf[self.oldest_entry:, :] = observation_tm1[:batch_size - tail]\n self._act_tm1_buf[self.oldest_entry:, :] = action_tm1[:batch_size - tail]\n self._obs_t_buf[self.oldest_entry:, :] = observation_t[:batch_size - tail]\n self._rew_t_buf[self.oldest_entry:, :] = reward_t[:batch_size - tail]\n self._terminal_t_buf[self.oldest_entry:, :] = terminal_t[:batch_size - tail]\n self._obs_tm1_buf[:tail, :] = observation_tm1[-tail:]\n self._act_tm1_buf[:tail, :] = action_tm1[-tail:]\n self._obs_t_buf[:tail, :] = observation_t[-tail:]\n self._rew_t_buf[:tail, :] = reward_t[-tail:]\n self._terminal_t_buf[:tail, :] = terminal_t[-tail:]\n self.oldest_entry = tail\n self.size = self.capacity\n\n def __getitem__(self, indices):\n return Transition(\n self._obs_tm1_buf[indices], \n self._act_tm1_buf[indices],\n self._rew_t_buf[indices], \n self._obs_t_buf[indices],\n self._terminal_t_buf[indices])\n \n def sample_batch(\n self, batch_size: int\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Sample transitions from the ExperienceBuffer.\n \"\"\"\n indices = np.random.choice(self._sample_range[:self.size], size=batch_size)\n return self[indices]\n \n def serializable(self): \n \"\"\"\n Get pickable representation of Replay Buffer.\n \"\"\" \n lst_serialize = [self._obs_tm1_buf, \n self._act_tm1_buf, \n self._obs_t_buf,\n self._rew_t_buf, \n self._terminal_t_buf,\n self._sample_range, \n self.oldest_entry, \n self.capacity, \n self.size]\n return lst_serialize\n \n def load(self, lst_serializable):\n \"\"\"\n Load pickable representation of Replay Buffer. Inverse function of serializable\n \"\"\"\n self._obs_tm1_buf = lst_serializable[0]\n self._act_tm1_buf = lst_serializable[1]\n self._obs_t_buf = lst_serializable[2]\n self._rew_t_buf = lst_serializable[3]\n self._terminal_t_buf = lst_serializable[4]\n self._sample_range = lst_serializable[5]\n self.oldest_entry = lst_serializable[6]\n self.capacity = lst_serializable[7]\n self.size = lst_serializable[8]","repo_name":"Hanabi-Game-Project/hanabi-agents","sub_path":"hanabi_agents/rlax_dqn/experience_buffer.py","file_name":"experience_buffer.py","file_ext":"py","file_size_in_byte":5564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27051323357","text":"from http import HTTPStatus\nimport requests\n\n# Checks if the order status method returns\n# status 404 for the order which is not created\n# yet.\n\n\n# RESTAURANT SERVICE : http://localhost:8080\n# DELIVERY SERVICE : http://localhost:8081\n# WALLET SERVICE : http://localhost:8082\n\ndef test():\n test_result = 'Pass'\n\n # Reinitialize Restaurant service\n http_response = requests.post(\"http://localhost:8080/reInitialize\")\n\n if(http_response.status_code != HTTPStatus.CREATED):\n test_result = 'Fail'\n\n # Reinitialize Delivery service\n http_response = requests.post(\"http://localhost:8081/reInitialize\")\n\n if(http_response.status_code != HTTPStatus.CREATED):\n test_result = 'Fail'\n\n # Reinitialize Wallet service\n http_response = requests.post(\"http://localhost:8082/reInitialize\")\n\n if(http_response.status_code != HTTPStatus.CREATED):\n test_result = 'Fail'\n\n # Check Order status\n http_response = requests.get(f\"http://localhost:8081/order/1000\")\n\n if(http_response.status_code != HTTPStatus.NOT_FOUND):\n test_result = 'Fail'\n\n return test_result\n\n\nif __name__ == \"__main__\":\n test_result = test()\n print(test_result)\n","repo_name":"kawinm/FoodDeliveryProjectSpring","sub_path":"Tests/public-test-cases/Public2-Project1Phase1.py","file_name":"Public2-Project1Phase1.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"74449666568","text":"from typing import Any, Dict\nfrom django.shortcuts import render\nfrom django.contrib.auth.views import LoginView, LogoutView\nfrom django.urls import reverse_lazy\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin\nfrom django.views.generic import TemplateView, ListView, CreateView, DetailView, UpdateView, DeleteView\nfrom main import models, forms\n\n\nclass WarehouseListView(ListView):\n template_name = 'main/warehouse/list.html'\n model = models.Warehouse\n\n \n def get_context_data(self, **kwargs: Any) -> Dict[str, Any]:\n context = super().get_context_data(**kwargs)\n\n context[\"header_text\"] = 'Склад'\n context[\"back_url\"] = '/'\n\n return context\n\n\nclass WarehouseCreateView(CreateView):\n template_name = 'main/warehouse/new.html'\n model = models.Warehouse\n form_class = forms.WarehouseCreateForm\n success_url = \"/warehouse/\"\n\n def get_context_data(self, **kwargs: Any) -> Dict[str, Any]:\n context = super().get_context_data(**kwargs)\n\n context[\"header_text\"] = 'Добавление склада'\n context[\"back_url\"] = '/warehouse/'\n\n return context\n\n\nclass WarehouseEditView(UpdateView):\n template_name = 'main/warehouse/edit.html'\n model = models.Warehouse\n form_class = forms.WarehouseCreateForm\n success_url = \"/warehouse/\"\n\n def get_context_data(self, **kwargs: Any) -> Dict[str, Any]:\n context = super().get_context_data(**kwargs)\n\n context[\"header_text\"] = 'Склад'\n context[\"back_url\"] = '/warehouse/'\n\n return context\n\n\nclass WarehouseDeleteView(DeleteView):\n template_name = 'main/warehouse/delete.html'\n model = models.Warehouse\n form_class = forms.WarehouseCreateForm\n success_url = \"/warehouse/\"\n\n def get_context_data(self, **kwargs: Any) -> Dict[str, Any]:\n context = super().get_context_data(**kwargs)\n\n context[\"header_text\"] = 'Удаление склада'\n context[\"back_url\"] = '/warehouse/edit/%s'%self.kwargs.get('pk')\n\n return context\n","repo_name":"Lifanna/warehouse_mobile_web","sub_path":"warehouse_app/main/views_warehouse.py","file_name":"views_warehouse.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"20940057619","text":"from dataclasses import dataclass, field\nfrom typing import List\nfrom bindings.csw.abstract_topology_type import AbstractTopologyType\nfrom bindings.csw.container_property_type import DirectedTopoSolid\n\n__NAMESPACE__ = \"http://www.opengis.net/gml\"\n\n\n@dataclass\nclass TopoVolumeType(AbstractTopologyType):\n \"\"\"The TopoVolume type and element represent a homogeneous topological\n expression, a set of directed TopoSolids, which if realised are isomorphic\n to a geometric solid primitive.\n\n The intended use of TopoVolume is to appear within a 3D solid\n feature instance to express the structural and geometric\n relationships of this solid to other features via the shared\n TopoSolid definitions. . Note the orientation assigned to the\n directedSolid has no meaning in three dimensions. It is preserved\n for symmetry with the preceding types and elements.\n \"\"\"\n\n directed_topo_solid: List[DirectedTopoSolid] = field(\n default_factory=list,\n metadata={\n \"name\": \"directedTopoSolid\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.opengis.net/gml\",\n \"min_occurs\": 1,\n },\n )\n","repo_name":"NIVANorge/s-enda-playground","sub_path":"catalog/bindings/csw/topo_volume_type.py","file_name":"topo_volume_type.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34118329962","text":"import glob\nimport numpy as np\nimport os\nfrom os.path import join\nimport cv2\nimport xml.etree.ElementTree as ET\nimport sys\nfrom xml.dom.minidom import parseString\nfrom lxml.etree import Element, SubElement, tostring\nfrom xml.dom.minidom import Document\nfrom tqdm import tqdm\nif sys.version_info[0] == 2:\n import xml.etree.cElementTree as ET\nelse:\n import xml.etree.ElementTree as ET\n\n# origin_dir = '原图像存放地址'\n# target_dir1 = '分块图像存放地址'\n# annota_dir = '原boundingbox的xml文件存放地址'\n# target_dir2 = '分块boundingbox的xml文件存放地址'\n\n\ndef clip_img(No, oriname, win_size, stride):\n from_name = os.path.join(origin_dir, oriname+'.jpg')\n img = cv2.imread(from_name)\n h_ori, w_ori, _ = img.shape # 保存原图的大小\n # img = cv2.resize(img, (5472, 3648))#可以resize也可以不resize,看情况而定\n h, w, _ = img.shape\n xml_name = os.path.join(annota_dir, oriname+'.xml') # 读取每个原图像的xml文件\n xml_ori = ET.parse(xml_name).getroot()\n res = np.empty((0, 5)) # 存放坐标的四个值和类别\n for obj in xml_ori.iter('object'):\n difficult = int(obj.find('difficult').text) == 1\n if difficult:\n continue\n name = obj.find('name').text.lower().strip()\n bbox = obj.find('bndbox')\n pts = ['xmin', 'ymin', 'xmax', 'ymax']\n bndbox = []\n for i, pt in enumerate(pts):\n cur_pt = int(bbox.find(pt).text) - 1\n cur_pt = int(\n cur_pt*h/h_ori) if i % 2 == 1 else int(cur_pt * w / w_ori)\n bndbox.append(cur_pt)\n #label_idx = self.class_to_ind[name]\n bndbox.append(name)\n res = np.vstack((res, bndbox))\n i = 0\n # win_size = 1368 # 分块的大小\n # stride = 684 # 重叠的大小,设置这个可以使分块有重叠\n for r in range(0, h - win_size, stride):\n for c in range(0, w - win_size, stride):\n # flag = np.zeros([1,100]) # 这里不应该只有10吧,不然后面第75行 re对象个数基本会超过10个obj\n flag = np.zeros([1, len(res)])\n youwu = False\n xiefou = True\n tmp = img[r: r+win_size, c: c+win_size]\n for re in range(res.shape[0]):\n xmin, ymin, xmax, ymax, label = res[re]\n if int(xmin) >= c and int(xmax) <= c+win_size and int(ymin) >= r and int(ymax) <= r+win_size:\n flag[0][re] = 1\n youwu = True\n elif int(xmin) < c or int(xmax) > c+win_size or int(ymin) < r or int(ymax) > r+win_size:\n pass\n else:\n xiefou = False\n break\n if xiefou: # 如果物体被分割了,则忽略不写入\n if youwu: # 有物体则写入xml文件\n doc = Document()\n annotation = doc.createElement('annotation')\n doc.appendChild(annotation)\n for re in range(res.shape[0]):\n xmin, ymin, xmax, ymax, label = res[re]\n xmin = int(xmin)\n ymin = int(ymin)\n xmax = int(xmax)\n ymax = int(ymax)\n if flag[0][re] == 1:\n xmin = str(xmin-c)\n ymin = str(ymin-r)\n xmax = str(xmax-c)\n ymax = str(ymax-r)\n object_charu = doc.createElement('object')\n annotation.appendChild(object_charu)\n name_charu = doc.createElement('name')\n name_charu_text = doc.createTextNode(label)\n name_charu.appendChild(name_charu_text)\n object_charu.appendChild(name_charu)\n dif = doc.createElement('difficult')\n dif_text = doc.createTextNode('0')\n dif.appendChild(dif_text)\n object_charu.appendChild(dif)\n bndbox = doc.createElement('bndbox')\n object_charu.appendChild(bndbox)\n xmin1 = doc.createElement('xmin')\n xmin_text = doc.createTextNode(xmin)\n xmin1.appendChild(xmin_text)\n bndbox.appendChild(xmin1)\n ymin1 = doc.createElement('ymin')\n ymin_text = doc.createTextNode(ymin)\n ymin1.appendChild(ymin_text)\n bndbox.appendChild(ymin1)\n xmax1 = doc.createElement('xmax')\n xmax_text = doc.createTextNode(xmax)\n xmax1.appendChild(xmax_text)\n bndbox.appendChild(xmax1)\n ymax1 = doc.createElement('ymax')\n ymax_text = doc.createTextNode(ymax)\n ymax1.appendChild(ymax_text)\n bndbox.appendChild(ymax1)\n else:\n continue\n xml_name = oriname+'_'+str(win_size)+'%d.xml' % (i)\n to_xml_name = os.path.join(target_dir2, xml_name)\n with open(to_xml_name, 'wb+') as f:\n f.write(doc.toprettyxml(indent=\"\\t\", encoding='utf-8'))\n #name = '%02d_%02d_%02d_.bmp' % (No, int(r/win_size), int(c/win_size))\n img_name = oriname+'_'+str(win_size)+'%d.jpg' % (i)\n to_name = os.path.join(target_dir1, img_name)\n i = i+1\n cv2.imwrite(to_name, tmp)\n\n\ndef getImagesInDir():\n image_list = []\n for ext in [\"*.JPG\", \"*.jpg\", \"*.png\", \"*.jpeg\"]:\n filenames = glob.glob(os.path.join(target_dir1, ext))\n for filename in filenames:\n image_name = filename.split(\"\\\\\")[-1]\n image_list.append(image_name)\n return image_list\n\n\ndef convert(size, box):\n\n dw = 1./(size[0])\n dh = 1./(size[1])\n x = (box[0] + box[1])/2.0 - 1\n y = (box[2] + box[3])/2.0 - 1\n w = box[1] - box[0]\n h = box[3] - box[2]\n x = x*dw\n w = w*dw\n y = y*dh\n h = h*dh\n\n return (x, y, w, h)\n\n\ndef convert_annotation(image_path):\n global win_size\n basename = os.path.basename(image_path)\n basename_no_ext = os.path.splitext(basename)[0]\n\n in_file = open(target_dir2+'\\\\' + basename_no_ext + '.xml')\n out_file = open(target_dir3 + '\\\\' + basename_no_ext + '.txt', 'w')\n tree = ET.parse(in_file)\n root = tree.getroot()\n # size = root.find('size')\n # w = int(size.find('width').text)\n # h = int(size.find('height').text)\n\n for obj in root.iter('object'):\n difficult = obj.find('difficult').text\n cls = obj.find('name').text\n if cls not in classes or int(difficult) == 1:\n continue\n cls_id = classes.index(cls)\n xmlbox = obj.find('bndbox')\n b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(\n xmlbox.find('ymin').text), float(xmlbox.find('ymax').text))\n bb = convert((win_size, win_size), b)\n out_file.write(str(cls_id) + \" \" + str(bb[0]) + \" \" + str(\n bb[1]) + \" \" + str(bb[2]) + \" \" + str(bb[3]) + '\\n')\n\n\ndef VOC2YOLO():\n image_paths = getImagesInDir()\n for image_path in image_paths:\n convert_annotation(image_path)\n print(\"Finished processing\")\n\n\ndef unconvert(class_id, width, height, x, y, w, h):\n xmax = int((x*width) + (w * width)/2.0)\n xmin = int((x*width) - (w * width)/2.0)\n ymax = int((y*height) + (h * height)/2.0)\n ymin = int((y*height) - (h * height)/2.0)\n class_id = int(class_id)\n return (class_id, xmin, xmax, ymin, ymax)\n\n\ndef YOLO2VOC(classes):\n\n classes = list(classes)\n ids = list()\n l = os.listdir(labels_path)\n\n check = '.DS_Store' in l\n if check == True:\n l.remove('.DS_Store')\n\n ids = [x.split('.')[0] for x in l]\n\n annopath = join(labels_path, '%s.txt')\n imgpath = join(img_path, '%s.jpg')\n\n if not os.path.exists(path+'/voc'):\n os.makedirs(path+'/voc')\n\n outpath = join(path+'/voc', '%s.xml')\n\n for i in range(len(ids)):\n img_id = ids[i]\n img = cv2.imread(imgpath % img_id)\n height, width, channels = img.shape\n\n node_root = Element('annotation')\n node_folder = SubElement(node_root, 'folder')\n node_folder.text = 'yh'\n img_name = img_id + '.jpg'\n\n node_filename = SubElement(node_root, 'filename')\n node_filename.text = img_name\n\n node_source = SubElement(node_root, 'source')\n node_database = SubElement(node_source, 'database')\n node_database.text = 'Coco database'\n\n node_size = SubElement(node_root, 'size')\n node_width = SubElement(node_size, 'width')\n node_width.text = str(width)\n\n node_height = SubElement(node_size, 'height')\n node_height.text = str(height)\n\n node_depth = SubElement(node_size, 'depth')\n node_depth.text = str(channels)\n\n node_segmented = SubElement(node_root, 'segmented')\n node_segmented.text = '0'\n\n target = (annopath % img_id)\n if os.path.exists(target):\n label_norm = np.loadtxt(target).reshape(-1, 5)\n\n for i in range(len(label_norm)):\n labels_conv = label_norm[i]\n new_label = unconvert(\n labels_conv[0], width, height, labels_conv[1], labels_conv[2], labels_conv[3], labels_conv[4])\n node_object = SubElement(node_root, 'object')\n node_name = SubElement(node_object, 'name')\n node_name.text = classes[new_label[0]]\n\n node_pose = SubElement(node_object, 'pose')\n node_pose.text = 'Unspecified'\n\n node_truncated = SubElement(node_object, 'truncated')\n node_truncated.text = '0'\n node_difficult = SubElement(node_object, 'difficult')\n node_difficult.text = '0'\n node_bndbox = SubElement(node_object, 'bndbox')\n node_xmin = SubElement(node_bndbox, 'xmin')\n node_xmin.text = str(new_label[1])\n node_ymin = SubElement(node_bndbox, 'ymin')\n node_ymin.text = str(new_label[3])\n node_xmax = SubElement(node_bndbox, 'xmax')\n node_xmax.text = str(new_label[2])\n node_ymax = SubElement(node_bndbox, 'ymax')\n node_ymax.text = str(new_label[4])\n xml = tostring(node_root, pretty_print=True)\n dom = parseString(xml)\n f = open(outpath % img_id, \"wb\")\n f.write(xml)\n f.close()\n print(\"Finished processing\")\n\n\nif __name__ == '__main__':\n\n path = os.path.dirname(os.path.realpath(sys.argv[0]))\n\n files = ['images_crop', 'voc_crop', 'yolo_label_crop']\n for file in files:\n if not os.path.exists(path+'\\\\'+file):\n os.makedirs(path+'\\\\'+file)\n\n with open(path+'/classes.txt', \"r\") as f: # 打开文件\n classes = list(f.read().split(',')) # 读取文件\n print(\"类别:\"+str(classes))\n img_path = input('原始影像文件夹地址:')\n labels_path = input('原始标注(labels)文件夹地址:')\n\n YOLO2VOC(classes)\n\n origin_dir = img_path\n target_dir1 = os.path.join(path, 'images_crop')\n annota_dir = os.path.join(path, 'voc')\n target_dir2 = os.path.join(path, 'voc_crop')\n target_dir3 = os.path.join(path, 'yolo_label_crop') # 最终标注yolo_label结果\n\n win_size = int(input('请输入分割像素尺寸(pix):'))\n stride = int(input('请输入重叠像素尺寸(pix):'))\n\n for No, name in tqdm(enumerate(os.listdir(origin_dir))):\n clip_img(No, name[:-4], win_size, stride)\n\n VOC2YOLO()\n","repo_name":"93yh/PD","sub_path":"DP.py","file_name":"DP.py","file_ext":"py","file_size_in_byte":12072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"21698825923","text":"from nautilus_trader.adapters.interactive_brokers.common import IB_VENUE\nfrom nautilus_trader.adapters.interactive_brokers.common import IBContract\nfrom nautilus_trader.adapters.interactive_brokers.config import IBMarketDataTypeEnum\nfrom nautilus_trader.adapters.interactive_brokers.config import InteractiveBrokersDataClientConfig\nfrom nautilus_trader.adapters.interactive_brokers.config import InteractiveBrokersExecClientConfig\nfrom nautilus_trader.adapters.interactive_brokers.config import InteractiveBrokersGatewayConfig\nfrom nautilus_trader.adapters.interactive_brokers.config import InteractiveBrokersInstrumentProviderConfig\nfrom nautilus_trader.adapters.interactive_brokers.factories import InteractiveBrokersLiveDataClientFactory\nfrom nautilus_trader.adapters.interactive_brokers.factories import InteractiveBrokersLiveExecClientFactory\nfrom nautilus_trader.config import LiveDataEngineConfig\nfrom nautilus_trader.config import LoggingConfig\nfrom nautilus_trader.config import RoutingConfig\nfrom nautilus_trader.config import TradingNodeConfig\nfrom nautilus_trader.examples.strategies.subscribe import SubscribeStrategy\nfrom nautilus_trader.examples.strategies.subscribe import SubscribeStrategyConfig\nfrom nautilus_trader.live.node import TradingNode\n\n\n# fmt: on\n\n# *** THIS IS A TEST STRATEGY WITH NO ALPHA ADVANTAGE WHATSOEVER. ***\n# *** IT IS NOT INTENDED TO BE USED TO TRADE LIVE WITH REAL MONEY. ***\n\n# *** THIS INTEGRATION IS STILL UNDER CONSTRUCTION. ***\n# *** CONSIDER IT TO BE IN AN UNSTABLE BETA PHASE AND EXERCISE CAUTION. ***\n\nib_contracts = [\n IBContract(\n secType=\"STK\",\n symbol=\"SPY\",\n exchange=\"SMART\",\n primaryExchange=\"ARCA\",\n build_options_chain=True,\n min_expiry_days=7,\n max_expiry_days=14,\n ),\n IBContract(\n secType=\"CONTFUT\",\n exchange=\"CME\",\n symbol=\"ES\",\n build_futures_chain=True,\n ),\n IBContract(secType=\"FUT\", exchange=\"NYMEX\", localSymbol=\"CLV3\", build_futures_chain=False),\n]\n\ngateway = InteractiveBrokersGatewayConfig(\n start=False,\n username=None,\n password=None,\n trading_mode=\"paper\",\n read_only_api=True,\n)\n\ninstrument_provider = InteractiveBrokersInstrumentProviderConfig(\n build_futures_chain=False,\n build_options_chain=False,\n min_expiry_days=10,\n max_expiry_days=60,\n load_ids=frozenset(\n [\n \"EUR/USD.IDEALPRO\",\n \"BTC/USD.PAXOS\",\n \"SPY.ARCA\",\n \"V.NYSE\",\n \"YMH24.CBOT\",\n \"CLZ27.NYMEX\",\n \"ESZ27.CME\",\n ],\n ),\n load_contracts=frozenset(ib_contracts),\n)\n\n# Configure the trading node\n\nconfig_node = TradingNodeConfig(\n trader_id=\"TESTER-001\",\n logging=LoggingConfig(log_level=\"INFO\"),\n data_clients={\n \"IB\": InteractiveBrokersDataClientConfig(\n ibg_host=\"127.0.0.1\",\n ibg_port=7497,\n ibg_client_id=1,\n handle_revised_bars=False,\n use_regular_trading_hours=True,\n market_data_type=IBMarketDataTypeEnum.DELAYED_FROZEN, # If unset default is REALTIME\n instrument_provider=instrument_provider,\n gateway=gateway,\n ),\n },\n exec_clients={\n \"IB\": InteractiveBrokersExecClientConfig(\n ibg_host=\"127.0.0.1\",\n ibg_port=7497,\n ibg_client_id=1,\n account_id=\"DU123456\", # This must match with the IB Gateway/TWS node is connecting to\n gateway=gateway,\n instrument_provider=instrument_provider,\n routing=RoutingConfig(\n default=True,\n ),\n ),\n },\n data_engine=LiveDataEngineConfig(\n time_bars_timestamp_on_close=False, # Will use opening time as `ts_event` (same like IB)\n validate_data_sequence=True, # Will make sure DataEngine discards any Bars received out of sequence\n ),\n timeout_connection=90.0,\n timeout_reconciliation=5.0,\n timeout_portfolio=5.0,\n timeout_disconnection=5.0,\n timeout_post_stop=2.0,\n)\n\n# Instantiate the node with a configuration\nnode = TradingNode(config=config_node)\n\n# Configure your strategy\nstrategy_config = SubscribeStrategyConfig(\n instrument_id=\"EUR/USD.IDEALPRO\",\n # book_type=None,\n # snapshots=True,\n trade_ticks=False,\n quote_ticks=True,\n # bars=True,\n)\n# Instantiate your strategy\nstrategy = SubscribeStrategy(config=strategy_config)\n\n# Add your strategies and modules\nnode.trader.add_strategy(strategy)\n\n# Register your client factories with the node (can take user defined factories)\nnode.add_data_client_factory(\"IB\", InteractiveBrokersLiveDataClientFactory)\nnode.add_exec_client_factory(\"IB\", InteractiveBrokersLiveExecClientFactory)\nnode.build()\nnode.portfolio.set_specific_venue(IB_VENUE)\n\n# Stop and dispose of the node with SIGINT/CTRL+C\nif __name__ == \"__main__\":\n try:\n node.run()\n finally:\n node.dispose()\n","repo_name":"nautechsystems/nautilus_trader","sub_path":"examples/live/interactive_brokers/interactive_brokers_example.py","file_name":"interactive_brokers_example.py","file_ext":"py","file_size_in_byte":4921,"program_lang":"python","lang":"en","doc_type":"code","stars":1199,"dataset":"github-code","pt":"16"} +{"seq_id":"15604273153","text":"import sys\nfrom pathlib import Path\n\nfrom genicam.genapi import NodeMap\nfrom harvesters.core import Harvester\n\nfrom common import CONNECTION_SETTINGS, cti_file_path\n\nFRAME_COUNT = 3\n\n\ndef main(device_sn: str):\n with Harvester() as h:\n print(\"Load .cti file...\")\n h.add_file(str(cti_file_path), check_existence=True, check_validity=True)\n h.update()\n\n print(f\"Connecting to: {device_sn}\")\n with h.create({\"serial_number\": device_sn}, config=CONNECTION_SETTINGS) as ia:\n features: NodeMap = ia.remote_device.node_map\n\n # \"FrameStart\" is the only supported trigger currently, so the following line could\n # be skipped, but it's safer to set it in case more triggers are added in the future.\n features.TriggerSelector.value = \"FrameStart\"\n features.TriggerMode.value = \"On\"\n features.TriggerSource.value = \"Software\"\n\n ia.start()\n for i in range(FRAME_COUNT):\n features.TriggerSoftware.execute()\n with ia.fetch(timeout=10) as buff:\n print(f\"Frame ID: {i}\")\n components = buff.payload.components\n for component in components:\n print(\n f\"Component: {components.index(component)}\\n\"\n f\"DataFormat: {component.data_format}\\n\"\n f\"Width: {component.width}\\n\"\n f\"Height: {component.height}\\n\"\n )\n\n\nif __name__ == \"__main__\":\n try:\n device_id = sys.argv[1]\n except IndexError:\n print(\n f\"Error: no device given, please run it with the device serial number as argument:\"\n )\n print(f\" {Path(__file__).name} \")\n sys.exit(1)\n main(device_id)\n","repo_name":"photoneo-3d/photoneo-python-examples","sub_path":"GigEV/harvesters/basic/connect_and_grab_example.py","file_name":"connect_and_grab_example.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"25285453935","text":"from typing import Any, NamedTuple\n\nimport opt_einsum\nimport torch\nfrom torch.fx.node import Node\n\nfrom ._fuse import _EINSUM_FUNCS\n\n\nclass SimpleMeta(NamedTuple):\n \"\"\"\n The full ShapeProp defines and uses a NamedTuple to\n store a whole bunch of metadata about the tensors\n going into and out of the Node op. But we don't\n have most of that info, and anyway, I don't think\n most of it's used in opt_einsum or opt_einsum_fx.\n (These are only concerned with computing a summation\n order.)\n\n Rather than give dummy or default values, which I\n only *assume* would be fine, I'm defining a NamedTuple\n with only the values we actually know. So if I'm wrong\n we will get a very clear error message, rather than\n some invisible error.\n \"\"\"\n\n shape: torch.Size\n dtype: torch.dtype\n\n\nclass EfficientShapeProp(torch.fx.Interpreter):\n \"\"\"\n Like ShapeProp, traverses a graph Node-by-Node\n and records the shape and type of the result\n into each Node.\n\n Except we treat 'einsum' as a special case.\n We don't actually execute 'einsum' on tensors,\n since the einsums will typically not be optimized\n yet (ShapeProp is called before optimization),\n and inefficient summation order can create\n enormous intermediate tensors, which often creates\n needless out-of-memory errors.\n\n So we override 'run_node' only for 'einsums'.\n It's straightforward to determine the shape of the\n result just from the output indices.\n\n (The call to opt_einsum that will typically follow\n this, also doesn't actually build the tensors\n during its exploration.)\n \"\"\"\n\n def run_node(self, n: Node) -> Any:\n if n.op == \"call_function\" and n.target in _EINSUM_FUNCS:\n args, kwargs = self.fetch_args_kwargs_from_env(n)\n equation, *operands = args\n shapes = [op.shape for op in operands]\n\n assert len({op.dtype for op in operands}) == 1\n meta = SimpleMeta(einsum_shape(equation, *shapes), operands[0].dtype)\n result = torch.zeros((1,) * len(meta.shape), dtype=meta.dtype, device=operands[0].device).expand(meta.shape)\n elif n.op == \"call_function\" and n.target == torch.tensordot:\n args, kwargs = self.fetch_args_kwargs_from_env(n)\n shape_a = [dim for i, dim in enumerate(args[0].shape) if i not in kwargs['dims'][0]]\n shape_b = [dim for i, dim in enumerate(args[1].shape) if i not in kwargs['dims'][1]]\n\n assert len({op.dtype for op in args}) == 1\n meta = SimpleMeta(shape_a + shape_b, args[0].dtype)\n result = torch.zeros((1,) * len(meta.shape), dtype=meta.dtype, device=args[0].device).expand(meta.shape)\n else:\n result = super().run_node(n)\n\n if isinstance(result, torch.Tensor):\n meta = SimpleMeta(result.shape, result.dtype)\n else:\n meta = None\n\n n.meta = dict()\n n.meta['tensor_meta'] = meta\n n.meta['type'] = type(result)\n\n return result\n\n def propagate(self, *args):\n return super().run(*args)\n\n\ndef einsum_shape(subscripts, *shapes):\n \"\"\"\n Given an einsum equation and input shapes, returns the output\n shape of the einsum.\n\n Args:\n subscripts: the einsum formula\n shapes: the input shapes\n \"\"\"\n Shaped = NamedTuple('Shaped', [('shape', tuple)])\n input_subscripts, output_subscript, _ = opt_einsum.parser.parse_einsum_input(\n (subscripts,) + tuple(Shaped(shape) for shape in shapes)\n )\n dims = {\n i: dim\n for ii, shape in zip(input_subscripts.split(','), shapes)\n for i, dim in zip(ii, shape)\n }\n return tuple(dims[i] for i in output_subscript)\n","repo_name":"Linux-cpp-lisp/opt_einsum_fx","sub_path":"opt_einsum_fx/_efficient_shape_prop.py","file_name":"_efficient_shape_prop.py","file_ext":"py","file_size_in_byte":3743,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"16"} +{"seq_id":"16857928726","text":"from Integrators import euler_osc_solver, verlet_osc_solver, verlet_ballistic_solver\nimport matplotlib.pyplot as plt\nfrom numpy import pi\nimport numpy as np\n\nif __name__ == \"__main__\":\n\n # Running the harmonic oscillator - commented\n '''\n # Parameters for the integrator\n time_params = [ 0.0 , 10*pi , 1000 ]\n pos_i = 1.0\n vel_i = 0.0\n spring_params = [ 1.0 , 1.0 , 0.0 ]\n\n time , pos , vel = euler_osc_solver( time_params , pos_i , vel_i , spring_params )\n time , pos_v , vel_v = verlet_osc_solver( time_params , pos_i , vel_i , spring_params )\n\n # Real solution to compare with\n pos_r = np.cos( time )\n\n # 2D Plot example of integrated vs. real solution\n fig, ax = plt.subplots()\n ax.plot( time , pos , color = \"green\" , linestyle = \"solid\" , label = r\"result Euler\" )\n ax.plot( time , pos_r , color = \"red\" , linestyle = \"solid\" , label = r\"real position\" )\n ax.plot( time , pos_v , color = \"blue\" , linestyle = \"solid\" , label = r\"result Verlet\" )\n\n ax.set_xlabel( r\"Time [sec]\" )\n ax.set_ylabel( r\"Position in [m]\" )\n\n ax.legend( loc = \"upper right\" )\n\n #ax.set_ylim( 0.0 , pi/2.0 )\n plt.grid( True )\n\n #fig.savefig( \"fig_name.pdf\" , format = \"pdf\" )\n \n plt.show()\n '''\n # Running the ballistic propagator\n # Parameters for the integrator\n time_params = [ 0.0 , 1.8 , 1000 ]\n vel_tot = 10.0 # [m/s]\n alpha = 60.0 # [deg]\n ball_params = 0.0\n\n time , pos , vel = verlet_ballistic_solver( time_params , vel_tot , alpha , ball_params )\n\n # Transpose positions to get time arrays separately\n x_plot = np.transpose( pos )[ 0 ]\n y_plot = np.transpose( pos )[ 1 ]\n\n # 2D Plot example of integrated vs. real solution\n fig, ax = plt.subplots()\n ax.plot( x_plot , y_plot , color = \"green\" , linestyle = \"solid\" , label = r\"Trajectory\" )\n\n ax.set_xlabel( r\"X position [m]\" )\n ax.set_ylabel( r\"Y position [m]\" )\n\n ax.legend( loc = \"upper right\" )\n plt.grid( True )\n\n #fig.savefig( \"fig_name.pdf\" , format = \"pdf\" )\n \n plt.show()","repo_name":"vidanchev/sc2022-num-methods","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"4588602102","text":"#!/usr/bin/python3\n\"\"\"\na script that takes in an argument and displays all values in the\nstates table of hbtn_0e_0_usa where name matches the argument.\nBut this time, write one that is safe from MySQL injections\n\"\"\"\nimport MySQLdb\nfrom sys import argv\n\n\ndef my_safe_filter_database():\n \"\"\"\n a function to filter the database\n \"\"\"\n\n db = MySQLdb.connect(\n host='localhost',\n port=3306,\n user=argv[1],\n passwd=argv[2],\n db=argv[3]\n )\n\n cursor = db.cursor()\n cursor.execute(\"SELECT * FROM states WHERE name=%s \\\n ORDER BY id ASC\", (argv[4],))\n rows = cursor.fetchall()\n\n for row in rows:\n print(row)\n\n cursor.close()\n db.close()\n\n\nif __name__ == '__main__':\n my_safe_filter_database()\n","repo_name":"MennatAllahhxx/alx-higher_level_programming","sub_path":"0x0F-python-object_relational_mapping/3-my_safe_filter_states.py","file_name":"3-my_safe_filter_states.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"30227150716","text":"##KNN\n##Decision Tree\n##Random Forest\n##Linear SVM \n##Non-linear SVM \nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.svm import SVC\n\nimport os\nimport cv2\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import confusion_matrix,accuracy_score\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA#Principal Component Analysis\n\nclass Prepar_dataset:\n def __init__(\n self,\n path=None,\n ):\n self.path = path\n self.features = []\n self.labels = []\n\n def giiist(self):\n import gist#https://github.com/tuttieee/lear-gist-python\n cnt = -1\n\n for c in os.listdir(self.path):\n cnt += 1\n for file_name in os.listdir(\n os.path.join(\n self.path,\n c,\n )\n ):\n file_path = os.path.join(self.path,c,file_name)\n img = cv2.imread(file_path)\n self.labels.append(cnt)\n self.features.append(gist.extract(img))\n \n## X_pan1 = pd.DataFrame(self.features)\n## X_pan1[\"labels\"] = self.labels\n## X_pan1.to_csv(\"UCMerced_LandUse_PCA.csv\")\n\n def PrincipalComponentAnalysis(self):\n \"\"\"giảm số chiều dữ liệu\"\"\"\n scaled_data = StandardScaler(\n ).fit_transform(self.features)# chuẩn hóa dữ liệu \n pca = PCA()# giảm só chiều dữ liệu \n pca.fit(scaled_data)\n print(pca.explained_variance_ratio_)\n\n pca1 = PCA(n_components=43)\n## data1 = pca1.fit_transform(scaled_data)\n self.features = pca1.fit_transform(scaled_data)\n\n## X_pan = pd.DataFrame(data1)\n## X_pan[\"labels\"] = self.labels\n## X_pan.to_csv(\"UCMerced_LandUse.csv\")\n## \"\"\"\n## UCMerced_LandUse.csv: dataset without PCA\n## UCMerced_LandUse_PCA.csv: dataset with PCA\n## \"\"\"\n## data = pd.read_csv('UCMerced_LandUse.csv')\n## features, labels = data.iloc[:,0:44], data.iloc[:,44]\n\nclass Clsification:\n \"\"\"Hầu hết các class phân loại trong sklearn đều có những phương thức sau:\n\n fit(X, y): Fit the model using X as training data and y as target values\n predict(X): Predict the class labels for the provided data\n predict_proba(X): Return probability estimates for the test data X (class SVM.SVC không có phương thức này)\n score(X, y): Returns the mean accuracy on the given test data and labels\n \"\"\"\n def __init__(\n self,\n features=None,\n labels=None,\n bestParams=False,\n ):\n if features is None and labels is None:\n print(self.__doc__)\n else:\n self.features=features\n self.labels=labels\n from sklearn.model_selection import train_test_split\n self.x_train, self.x_test, self.y_train, self.y_test\\\n = train_test_split(\n self.features,\n self.labels,\n test_size=0.2,\n )\n print('Number of training data: ',\n self.x_train.shape[0])\n print('Number of testing data: ',\n self.x_test.shape[0])\n def KNearestNeightbors(self):\n knn = KNeighborsClassifier(\n n_neighbors=1,\n algorithm='kd_tree'\n )\n param = {'n_neighbors': [1, 2]}\n return self.fiiit(knn, param)\n def DecisionTree(self):\n knn = DecisionTreeClassifier()\n param = {'criterion': ['gini', 'entropy'], 'splitter': ['best', 'random'], 'min_samples_leaf': [1, 5, 10]}\n return self.fiiit(knn, param)\n def SupportVectorMachines(self):\n knn = SVC(C = 20)\n param = {'C': [0.5, 1, 5, 20, 100, 500], 'kernel': ['poly', 'rbf', 'sigmoid']}\n return self.fiiit(knn, param)\n def fiiit(self, knn, param):\n print(knn) \n if bestParams==True:\n from sklearn.model_selection import GridSearchCV\n knnn = GridSearchCV(\n estimator=knn,\n param_grid=param,\n cv=3, n_jobs=4\n )\n print('knnn.best_params_: ', knnn.best_params_)\n else:\n knnn = knn\n knnn.fit(self.x_train, self.y_train)\n print(knnn.score(self.x_train, self.y_train))\n print(knnn.score(self.x_test, self.y_test))\n##s=Clsification()\ns=Prepar_dataset()\nprint(s.PrincipalComponentAnalysis.__doc__)\n","repo_name":"nguyenlamvu123/create_trailer","sub_path":"classi_fication.py","file_name":"classi_fication.py","file_ext":"py","file_size_in_byte":4786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"72279809928","text":"from typing import List, Union\n\nfrom fastapi import HTTPException\nimport yaml\nimport os\nfrom pathlib import Path\n\nfrom ..models import Bucket\nfrom .storage import Storage\nfrom .indices import Index\nfrom .buffer import CircularQueue, MaxSizeQueue\n\nfrom ..utils import PartialFormatter\n\n\nclass Buckets:\n\n def __init__(self, index: Index, storage: Storage):\n self._buckets = {b['name']: Bucket(**b) for b in self.load()}\n self._storage = storage\n self._index = index\n\n @staticmethod\n def load() -> List[dict]:\n \"\"\" loads the buckets configuration from file \"\"\"\n\n with open(Path(os.environ.get('BUCKETS_CONFIG', 'config/buckets.yml')), 'r') as fp:\n content = fp.read()\n\n variables = {'ENVIRONMENT': os.environ}\n\n s = PartialFormatter().format(content, **variables)\n\n buckets = yaml.safe_load(s)['buckets']\n\n validate_buckets(buckets)\n\n return buckets\n\n def get_queue(self, bucket: Union[Bucket, str]) -> CircularQueue:\n if isinstance(bucket, str):\n config = self.get(bucket).storage\n else:\n config = bucket.storage\n\n if config.max_size is not None:\n return MaxSizeQueue(config.max_size.absolute,\n config.usual_object_size, config.margin_size,\n self._index, self._storage)\n raise NotImplementedError(\"Not yet implemented!\")\n\n def get(self, bucket: str) -> Bucket:\n return self._buckets[bucket]\n\n def get_all(self) -> List[Bucket]:\n return list(self._buckets.values())\n\n def validate_bucket(self, bucket: str):\n if bucket not in self._buckets:\n raise HTTPException(400, {\n \"loc\": [\n \"query\",\n \"bucket\"\n ],\n \"msg\": \"value is not a valid bucket name\",\n \"type\": \"type_error.str\"\n })\n\n\ndef validate_buckets(buckets: List[dict]):\n \"\"\" validates the buckets configuration \"\"\"\n\n assert all('name' in b for b in buckets)\n assert len(set(b['name'] for b in buckets)) == len(buckets)\n","repo_name":"toschoch/python-storageapi","sub_path":"api/services/buckets.py","file_name":"buckets.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41935580153","text":"from spline import Spline\nfrom polynom import NewtonPolynom\nfrom utils import *\n\nfrom sys import argv\n\n\ndef main() -> None:\n f = argv[1]\n points = read_points(f)\n print(\"Table of points\\n\")\n print_points(points)\n\n print('\\nEntry X for interpolate:\\n')\n x = float(input())\n\n spline_res = Spline(points).solve(x)\n newton_res = NewtonPolynom(points).solve(x)\n\n print_res(spline_res, newton_res)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Flash1ee/ca-labs-4th-sem-bmstu","sub_path":"lab_03/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"24104027456","text":"\r\n\r\ndef calculations(stockSymbol, stockAllotment, stockFinalSharePrice, stockSellCommision, stockInitialSharePrice, stockBuyCommision, stockCapitalGainTaxRate):\r\n proceeds = int(stockAllotment) * float(stockFinalSharePrice)\r\n totalTax = (((float(stockFinalSharePrice) - float(stockInitialSharePrice)) * int(stockAllotment) - float(stockBuyCommision) - float(stockSellCommision)))\r\n tax = totalTax * float(stockCapitalGainTaxRate) / 100\r\n initialTotal = int(stockAllotment) * float(stockInitialSharePrice)\r\n cost = initialTotal + float(stockBuyCommision) + float(stockSellCommision) + tax\r\n netProfit = proceeds - cost\r\n returnOnInvestment = netProfit / cost * 100\r\n breakEven = (initialTotal + float(stockBuyCommision) + float(stockSellCommision)) / int(stockAllotment)\r\n\r\n print(\"Proceeds = $\", proceeds)\r\n print(\"Cost = $\", cost)\r\n print(\"NetProfit = $\", netProfit)\r\n print(\"Return on investment = \", returnOnInvestment, \"%\")\r\n print(\"Break Even Price = $\", breakEven)\r\n \r\n\r\ndef main():\r\n print(\"Enter stock symbol :\")\r\n stockSymbol = input()\r\n print(\"Enter stock allotment :\")\r\n stockAllotment = input()\r\n print(\"Enter stock final share price :\")\r\n stockFinalSharePrice = input()\r\n print(\"Enter stock sell commision :\")\r\n stockSellCommision = input()\r\n print(\"Enter stock initial share price :\")\r\n stockInitialSharePrice = input()\r\n print(\"Enter stock buy commision:\")\r\n stockBuyCommision = input()\r\n print(\"Enter stock capital gain tax rate :\")\r\n stockCapitalGainTaxRate = input()\r\n\r\n calculations(stockSymbol, stockAllotment, stockFinalSharePrice, stockSellCommision, stockInitialSharePrice, stockBuyCommision, stockCapitalGainTaxRate)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n ","repo_name":"indureddem/cmpe285","sub_path":"Python-Stock-Profit-cal/stockprofitcalc.py","file_name":"stockprofitcalc.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"26330844422","text":"# -*- encoding: utf-8 -*-\n#!/usr/bin/env python\n\n###\n# font move\n###\n\n# 导入pygame库\nimport pygame\n# 导入一些常用的函数和常量\nfrom pygame.locals import *\n# 向sys模块借一个exit函数用来退出程序\nfrom sys import exit\n# 初始化pygame,为使用硬件做准备\npygame.init()\n\n# 创建了一个窗口\nscreen = pygame.display.set_mode((640, 480), 0, 32)\n# 设置窗口标题\npygame.display.set_caption(\"Hello, World!\")\n# 加载并转换图像\n# convert函数是将图像数据都转化为Surface对象,每次加载完图像以后就应该做这件事件(事实上因为 它太常用了,如果你不写pygame也会帮你做);\n# convert_alpha相比convert,保留了Alpha 通道信息(可以简单理解为透明的部分),这样我们的光标才可以是不规则的形状。\nbackground_image_filename = '../images/sushiplate.jpg'\nbackground = pygame.image.load(background_image_filename).convert()\nfont = pygame.font.SysFont(\"宋体\", 40)\n# font = pygame.font.Font(\"simsun.ttc\", 40)\ntext_surface = font.render(u\"你好\", True, (0, 0, 255))\nx = (640 - text_surface.get_width())/2\ny = (480 - text_surface.get_height())/2\n\nwhile True:\n for event in pygame.event.get():\n if event.type == QUIT:\n exit()\n\n # 将背景图画上去\n # blit是个重要函数,第一个参数为一个Surface对象,第二个为左上角位置。\n screen.blit(background, (0,0))\n\n # 文字滚动太快的话,改改这个数字\n x -= 2 \n if x < -text_surface.get_width():\n x = 640 - text_surface.get_width()\n\n screen.blit(text_surface, (x, y))\n # 刷新一下画面(画完以后一定记得用update更新一下,否则画面一片漆黑。)\n pygame.display.update()\n ","repo_name":"yanlinpu/information","sub_path":"python/pygames/lession4/font_move.py","file_name":"font_move.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"9146960521","text":"\nimport logging\nimport RPi.GPIO as GPIO\nfrom .interface_relay import InterfaceRelay\n\nclass InterfaceHeater(InterfaceRelay):\n\n def __init__(self, power_pin, pwm_pin=None):\n\n self._logger = logging.getLogger(__name__)\n\n self.power_pin = power_pin\n self.pwm_pin = pwm_pin\n \n super().__init__(self.power_pin)\n\n # setup GPIO pins\n GPIO.setmode(GPIO.BOARD)\n if self.pwm_pin is not None:\n self._setup_pwm()\n\n def __del__(self):\n \"\"\" Call GPIO.cleanup on all used pins\n \"\"\"\n if self.pwm_pin is not None:\n GPIO.cleanup(self.pwm_pin)\n\n def _setup_pwm(self):\n GPIO.setup(self.pwm_pin, GPIO.OUT)\n","repo_name":"davidsean/OpenMycelium","sub_path":"openmycelium/interface_heater.py","file_name":"interface_heater.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"34081119184","text":"nos = '0123456789'\r\n\r\n#def isnoice(x):\r\n# start_ind = str(x)[0]\r\n# for ch in str(x):\r\n \r\ndef chk():\r\n for i in range (len(nos)):\r\n s1 = nos[i:] + nos[0:i]\r\n s2 = s1[::-1]\r\n print(s1, s2)\r\n \r\nchk()\r\nnos = '0'+nos\r\nprint(nos)\r\nchk()","repo_name":"jaison-joseph/projecteuler","sub_path":"1-50/p24_dumb.py","file_name":"p24_dumb.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5427134283","text":"#!/usr/bin/python3\nimport json\nfrom country.country import Country\nfrom datetime import datetime, timezone\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import firestore\n\n\n\ndef main():\n cred = credentials.Certificate(\"covid-tracker-f7e16-firebase-adminsdk-sf3np-7c1b236bc3.json\")\n firebase_admin.initialize_app(cred, {\n 'projectId': \"covid-tracker-f7e16\",\n })\n\n db = firestore.client()\n file = open(\"countryData.json\", \"r\")\n data = json.load(file)\n\n print(db)\n\n for country in data['Countries']:\n #countryData = Country()\n time = country[\"Date\"].split(\"T\")\n date = time[0].split(\"-\")\n hour = time[1].split(\":\")\n seconds = hour[2].split(\".\")\n\n dateTime = datetime(int(date[0]), int(date[1]), int(date[2]), int(hour[0]), int(hour[1]), int(seconds[0]))\n timeStamp = datetime.timestamp(dateTime)\n\n data = {\n u'countryCode': country[\"CountryCode\"],\n u'countryName': country[\"Country\"],\n u'newConfirmed': country[\"NewConfirmed\"],\n u'totalConfirmed': country[\"TotalConfirmed\"],\n u'newDeaths': country[\"NewDeaths\"],\n u'totalDeaths': country[\"TotalDeaths\"],\n u'newRecovered': country[\"NewRecovered\"],\n u'totalRecovered': country[\"TotalRecovered\"],\n u'date': timeStamp\n }\n db.collection(u'stats').document(country[\"Country\"]).set(data)\n\n file.close()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Matttx/Flutter-II-Movies","sub_path":"data/fillDatabase.py","file_name":"fillDatabase.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5509300426","text":"#!/usr/bin/python\n\nimport sqlite3\nimport uuid\n\ndef createdb(conn):\n c = conn.cursor()\n c.execute(\"CREATE TABLE employee (id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, name TEXT NOT NULL)\")\n c.execute(\"CREATE INDEX employee_name on employee(name)\")\n\ndef insertData(conn, rows):\n c = conn.cursor()\n for i in range(rows):\n c.execute(\"INSERT INTO employee (name) VALUES (?)\", (str(uuid.uuid4()),))\n conn.commit()\n\ndef main():\n conn = sqlite3.connect('test.db')\n createdb(conn)\n insertData(conn, 25000)\n \n \n \nmain()\n","repo_name":"drichardson/examples","sub_path":"sqlite/build_db.py","file_name":"build_db.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"16"} +{"seq_id":"12861594968","text":"# a)\ndef read_from_file(filename):\n f = open(filename, 'r')\n contents = f.read()\n f.close()\n return contents\n\n\n# b)\ndef remove_symbols(text):\n characters = [',', '.', '!', '?', '%', '\"', \"'\", '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', '/', ';', '(', ')', '\\n', '\\t', '<', '>']\n new_word = ''\n for i in range(len(text)):\n okey = True\n for character in characters:\n if text[i] == character:\n okey = False\n break\n if okey:\n new_word += text[i].lower()\n\n words = new_word.split(' ')\n\n return words\n\n#''.join(letter for letter in text if letter.isalpha() or letter == ' '\n\n# c)\ndef count_words(filename):\n freq = {}\n\n for ord in filename:\n if ord not in freq:\n freq[ord] = 1\n else:\n freq[ord] += 1\n\n return freq\n\nbible_dict = count_words(remove_symbols(read_from_file('BIBLE.txt')))\nfor word, value in bible_dict.items():\n print(word, value)","repo_name":"ankile/ITGK-TDT4110","sub_path":"Øving 9/øving_9_7.py","file_name":"øving_9_7.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"31554628147","text":"import pandas as pd\nimport numpy as np\nimport sklearn\nfrom transformers import BertTokenizer\n\n\ndef transfer_to_tokens(text):\n maxlen=64\n tokenizer = BertTokenizer.from_pretrained(\"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt\")\n input_ids=tokenizer.encode(text)\n print(text)\n input_ids=tokenizer.encode(text,max_length=maxlen)\n print(input_ids)\n print(tokenizer.convert_ids_to_tokens(input_ids))\n\n\ndef main():\n train = pd.read_csv('/Users/zhiyan1992/documents/github/tutorial/twitter_forecast_bert/data/train_processed.csv',index_col='id')\n test = pd.read_csv('/Users/zhiyan1992/documents/github/tutorial/twitter_forecast_bert/data/test_processed.csv',index_col='id')\n train=train[['text','target']]\n print(train.head())\n case1=train['text'].values[0]\n\n transfer_to_tokens(case1)\n\n #sklearn.model_selection.train_test_split\n\nif __name__==\"__main__\":\n main()\n\n\n","repo_name":"yizhiyan1992/Twitter_forecast_BERT","sub_path":"program/Twitter_Bert_tokenization.py","file_name":"Twitter_Bert_tokenization.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"70783632648","text":"import math\nimport torch\nfrom torch.optim.optimizer import Optimizer\nimport itertools as it\n\n\nclass Ranger(Optimizer):\n\tdef __init__(self, params, lr=1e-3, alpha=0.5, k=6, n_sma_threshhold=5, betas=(.95, 0.999),\n\t\t\t\t eps=1e-5, weight_decay=0, amsgrad=True, transformer='softplus', smooth=50,\n\t\t\t\t grad_transformer='square'):\n\t\t# parameter checks\n\t\tif not 0.0 <= alpha <= 1.0:\n\t\t\traise ValueError(f'Invalid slow update rate: {alpha}')\n\t\tif not 1 <= k:\n\t\t\traise ValueError(f'Invalid lookahead steps: {k}')\n\t\tif not lr > 0:\n\t\t\traise ValueError(f'Invalid Learning Rate: {lr}')\n\t\tif not eps > 0:\n\t\t\traise ValueError(f'Invalid eps: {eps}')\n\n\t\t# parameter comments:\n\t\t# beta1 (momentum) of .95 seems to work better than .90...\n\t\t# N_sma_threshold of 5 seems better in testing than 4.\n\t\t# In both cases, worth testing on your dataset (.90 vs .95, 4 vs 5) to make sure which works best for you.\n\n\t\t# prep defaults and init torch.optim base\n\t\tdefaults = dict(lr=lr, alpha=alpha, k=k, step_counter=0, betas=betas,\n\t\t\t\t\t\tn_sma_threshhold=n_sma_threshhold, eps=eps, weight_decay=weight_decay,\n\t\t\t\t\t\tsmooth=smooth, transformer=transformer, grad_transformer=grad_transformer,\n\t\t\t\t\t\tamsgrad=amsgrad)\n\t\tsuper().__init__(params, defaults)\n\n\t\t# adjustable threshold\n\t\tself.n_sma_threshhold = n_sma_threshhold\n\n\t\t# look ahead params\n\t\tself.alpha = alpha\n\t\tself.k = k\n\n\t\t# radam buffer for state\n\t\tself.radam_buffer = [[None, None, None] for ind in range(10)]\n\n\t\t# self.first_run_check=0\n\n\t\t# lookahead weights\n\t\t# 9/2/19 - lookahead param tensors have been moved to state storage.\n\t\t# This should resolve issues with load/save where weights were left in GPU memory from first load, slowing down future runs.\n\n\t\t# self.slow_weights = [[p.clone().detach() for p in group['params']]\n\t\t# for group in self.param_groups]\n\n\t\t# don't use grad for lookahead weights\n\t\t# for w in it.chain(*self.slow_weights):\n\t\t# w.requires_grad = False\n\n\tdef __setstate__(self, state):\n\t\tprint(\"set state called\")\n\t\tsuper(Ranger, self).__setstate__(state)\n\n\tdef step(self, closure=None):\n\t\tloss = None\n\t\t# note - below is commented out b/c I have other work that passes back the loss as a float, and thus not a callable closure.\n\t\t# Uncomment if you need to use the actual closure...\n\n\t\t# if closure is not None:\n\t\t# loss = closure()\n\n\t\t# Evaluate averages and grad, update param tensors\n\t\tfor group in self.param_groups:\n\n\t\t\tfor p in group['params']:\n\t\t\t\tif p.grad is None:\n\t\t\t\t\tcontinue\n\t\t\t\tgrad = p.grad.data.float()\n\t\t\t\tif grad.is_sparse:\n\t\t\t\t\traise RuntimeError('Ranger optimizer does not support sparse gradients')\n\n\t\t\t\tamsgrad = group['amsgrad']\n\t\t\t\tsmooth = group['smooth']\n\t\t\t\tgrad_transformer = group['grad_transformer']\n\n\t\t\t\tp_data_fp32 = p.data.float()\n\n\t\t\t\tstate = self.state[p] # get state dict for this param\n\n\t\t\t\tif len(state) == 0: # if first time to run...init dictionary with our desired entries\n\t\t\t\t\t# if self.first_run_check==0:\n\t\t\t\t\t# self.first_run_check=1\n\t\t\t\t\t# print(\"Initializing slow buffer...should not see this at load from saved model!\")\n\t\t\t\t\tstate['step'] = 0\n\t\t\t\t\tstate['exp_avg'] = torch.zeros_like(p_data_fp32)\n\t\t\t\t\tstate['exp_avg_sq'] = torch.zeros_like(p_data_fp32)\n\t\t\t\t\tif amsgrad:\n\t\t\t\t\t\t# Maintains max of all exp. moving avg. of sq. grad. values\n\t\t\t\t\t\tstate['max_exp_avg_sq'] = torch.zeros_like(p.data)\n\n\t\t\t\t\t\t# look ahead weight storage now in state dict\n\t\t\t\t\tstate['slow_buffer'] = torch.empty_like(p.data)\n\t\t\t\t\tstate['slow_buffer'].copy_(p.data)\n\n\t\t\t\telse:\n\t\t\t\t\tstate['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)\n\t\t\t\t\tstate['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)\n\n\t\t\t\t# begin computations\n\t\t\t\texp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n\t\t\t\tbeta1, beta2 = group['betas']\n\t\t\t\tif amsgrad:\n\t\t\t\t\tmax_exp_avg_sq = state['max_exp_avg_sq']\n\n\t\t\t\t\t# compute variance mov avg\n\t\t\t\texp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n\t\t\t\t# compute mean moving avg\n\t\t\t\texp_avg.mul_(beta1).add_(1 - beta1, grad)\n\n\t\t\t\t##transformer\n\t\t\t\tif grad_transformer == 'square':\n\t\t\t\t\tgrad_tmp = grad ** 2\n\t\t\t\telif grad_transformer == 'abs':\n\t\t\t\t\tgrad_tmp = grad.abs()\n\n\t\t\t\texp_avg_sq.mul_(beta2).add_((1 - beta2) * grad_tmp)\n\n\t\t\t\tif amsgrad:\n\t\t\t\t\t# Maintains the maximum of all 2nd moment running avg. till now\n\t\t\t\t\ttorch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)\n\t\t\t\t\t# Use the max. for normalizing running avg. of gradient\n\t\t\t\t\tdenomc = max_exp_avg_sq.clone()\n\t\t\t\telse:\n\t\t\t\t\tdenomc = exp_avg_sq.clone()\n\n\t\t\t\tif grad_transformer == 'square':\n\t\t\t\t\t# pdb.set_trace()\n\t\t\t\t\tdenomc.sqrt_()\n\n\t\t\t\tstate['step'] += 1\n\n\t\t\t\tif group['weight_decay'] != 0:\n\t\t\t\t\tp_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)\n\n\t\t\t\tbias_correction1 = 1 - beta1 ** state['step']\n\t\t\t\tbias_correction2 = 1 - beta2 ** state['step']\n\t\t\t\tstep_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1\n\n\t\t\t\t# ...let's use calibrated alr\n\t\t\t\tif group['transformer'] == 'softplus':\n\t\t\t\t\tsp = torch.nn.Softplus(smooth)\n\t\t\t\t\tdenomf = sp(denomc)\n\t\t\t\t\tp_data_fp32.addcdiv_(-step_size, exp_avg, denomf)\n\n\t\t\t\telse:\n\n\t\t\t\t\tdenom = exp_avg_sq.sqrt().add_(group['eps'])\n\t\t\t\t\tp_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom)\n\n\t\t\t\tp.data.copy_(p_data_fp32)\n\n\t\t\t\t# integrated look ahead...\n\t\t\t\t# we do it at the param level instead of group level\n\t\t\t\tif state['step'] % group['k'] == 0:\n\t\t\t\t\tslow_p = state['slow_buffer'] # get access to slow param tensor\n\t\t\t\t\tslow_p.add_(self.alpha, p.data - slow_p) # (fast weights - slow weights) * alpha\n\t\t\t\t\tp.data.copy_(slow_p) # copy interpolated weights to RAdam param tensor\n\n\t\treturn loss\n\n\nclass SAM(torch.optim.Optimizer):\n def __init__(self, params, base_optimizer, rho=0.05, adaptive=False, **kwargs):\n assert rho >= 0.0, f\"Invalid rho, should be non-negative: {rho}\"\n\n defaults = dict(rho=rho, adaptive=adaptive, **kwargs)\n super(SAM, self).__init__(params, defaults)\n\n self.base_optimizer = base_optimizer(self.param_groups, **kwargs)\n self.param_groups = self.base_optimizer.param_groups\n\n @torch.no_grad()\n def first_step(self, zero_grad=False):\n grad_norm = self._grad_norm()\n for group in self.param_groups:\n scale = group[\"rho\"] / (grad_norm + 1e-12)\n\n for p in group[\"params\"]:\n if p.grad is None: continue\n self.state[p][\"old_p\"] = p.data.clone()\n e_w = (torch.pow(p, 2) if group[\"adaptive\"] else 1.0) * p.grad * scale.to(p)\n p.add_(e_w) # climb to the local maximum \"w + e(w)\"\n\n if zero_grad: self.zero_grad()\n\n @torch.no_grad()\n def second_step(self, zero_grad=False):\n for group in self.param_groups:\n for p in group[\"params\"]:\n if p.grad is None: continue\n p.data = self.state[p][\"old_p\"] # get back to \"w\" from \"w + e(w)\"\n\n self.base_optimizer.step() # do the actual \"sharpness-aware\" update\n\n if zero_grad: self.zero_grad()\n\n @torch.no_grad()\n def step(self, closure=None):\n assert closure is not None, \"Sharpness Aware Minimization requires closure, but it was not provided\"\n closure = torch.enable_grad()(closure) # the closure should do a full forward-backward pass\n\n self.first_step(zero_grad=True)\n closure()\n self.second_step()\n\n def _grad_norm(self):\n shared_device = self.param_groups[0][\"params\"][0].device # put everything on the same device, in case of model parallelism\n norm = torch.norm(\n torch.stack([\n ((torch.abs(p) if group[\"adaptive\"] else 1.0) * p.grad).norm(p=2).to(shared_device)\n for group in self.param_groups for p in group[\"params\"]\n if p.grad is not None\n ]),\n p=2\n )\n return norm\n\n def load_state_dict(self, state_dict):\n super().load_state_dict(state_dict)\n self.base_optimizer.param_groups = self.param_groups\n","repo_name":"kingqiuol/pytorch-template","sub_path":"utils/optim.py","file_name":"optim.py","file_ext":"py","file_size_in_byte":7994,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"25461651627","text":"from bs4 import BeautifulSoup\nfrom urllib.request import urlopen\nfrom list_definition import BRAND_LIST\nimport urllib\n\n\nclass LinkFinder():\n\n def __init__(self):\n super().__init__()\n self.total_links = 0\n self.links = {}\n self.list_brand = BRAND_LIST\n\n def page_links(self):\n return self.links\n\n def error(self, message):\n pass\n\n def getProductUrlTiki(self):\n\n # =============================================================================\n # Using brandUrl set() to get numpage of each URL\n # =============================================================================\n brand_counts = 0\n brand_urls = list()\n url_and_numpage = dict()\n\n for brand in self.list_brand:\n url = 'https://tiki.vn/dien-thoai-may-tinh-bang/c1789/' + brand\n brand_urls.append(url)\n\n for url in brand_urls:\n max_num_page = 1\n try:\n soup = BeautifulSoup(urlopen(url), \"lxml\")\n except Exception:\n print('Exception : ' + str(Exception))\n pass\n division = soup.find(\"div\", {\"class\": \"list-pager\"})\n print('Get numpage ' + url)\n if division is None:\n url_and_numpage.update({url: max_num_page})\n else:\n anchors = division.find_all('a')\n for anchor in anchors:\n if int(anchor.get('href')[-1]) > max_num_page:\n max_num_page = int(anchor.get('href')[-1])\n url_and_numpage.update({url: max_num_page})\n\n # =============================================================================\n # Pass all URLs of phones and tablet in Tiki.vn into .txt file\n # =============================================================================\n\n for url, num_pages in url_and_numpage.items():\n link_counts = 0\n for page in range(1, num_pages + 1):\n url_with_pages = url + '&page=' + str(page)\n print('... Crawling ' + url_with_pages)\n try:\n soup = BeautifulSoup(urlopen(url_with_pages), \"lxml\")\n except Exception:\n print('Exception : ' + str(Exception))\n pass\n division = soup.find(\"div\", {\"class\": \"product-box-list\",\n \"data-impress-list-title\": \"Category | Điện Thoại - Máy Tính Bảng\"})\n anchors = division.find_all('a')\n for anchor in anchors:\n href = anchor.get('href')\n if '?' in href:\n href = href.split('?')[:-1][0]\n self.links[self.list_brand[brand_counts], link_counts] = href\n self.total_links += 1\n link_counts += 1\n brand_counts += 1\n\n print(\"\"\"---------------------------------\n Stop crawling product URLs in tiki.vn!\n ---------------------------------\"\"\")\n\n def getProductUrlAdayroi(self):\n brand_counts = 0\n list_phones_in_web = [\"iphone\", \"samsung\", \"oppo\", \"nokia\", \"asus\", \"sony\", \"xiaomi\"]\n list_tablets_in_web = [\"apple\", \"samsung\", \"xiaomi\"]\n brand_urls = list()\n\n url = 'https://www.adayroi.com/dien-thoai-di-dong-c323'\n try:\n soup = BeautifulSoup(urlopen(url), \"lxml\")\n except Exception:\n print('Exception : ' + str(Exception))\n pass\n ul_category = soup.find(\"ul\", {\"data-role\": \"listview\", \"class\": \"category-menu child-level-3\"})\n phones_anchors = ul_category.find_all('a')\n\n for brand in list_phones_in_web:\n for anchor in phones_anchors:\n if brand in anchor.get('href'):\n crawl_url = 'https://www.adayroi.com' + anchor.get('href')\n brand_urls.append(crawl_url)\n\n url_and_numpage = dict()\n for url in brand_urls:\n max_num_pages = 0\n try:\n soup = BeautifulSoup(urlopen(url), \"lxml\")\n except Exception:\n print('Exception : ' + str(Exception))\n pass\n nav = soup.find(\"nav\", {\"class\": \"Page navigation\"})\n print('Get numpage ' + url)\n if nav is None:\n url_and_numpage.update({url: max_num_pages})\n else:\n anchors = nav.find_all('a')\n if int(anchors[-3].get('href')[-1]) > max_num_pages:\n max_num_pages = int(anchors[-3].get('href')[-1])\n url_and_numpage.update({url: max_num_pages})\n\n # =============================================================================\n # Pass all URLs of phones and tablet in Adayroi into .txt file\n # =============================================================================\n for url, num_pages in url_and_numpage.items():\n link_counts = 0\n for page in range(0, num_pages + 1):\n url_with_page = url + '?q=%3Arelevance&page=' + str(page)\n print('... Crawling ' + url_with_page)\n try:\n soup = BeautifulSoup(urlopen(url_with_page), \"lxml\")\n except Exception:\n print('Exception : ' + str(Exception))\n pass\n division = soup.find(\"div\", {\"class\": \"product-list__container\"})\n anchors = division.find_all('a')\n for anchor in anchors:\n href = 'https://adayroi.com' + anchor.get('href')\n if '?' in href:\n href = href.split('?')[:-1][0]\n self.links[self.list_brand[brand_counts], link_counts] = href\n self.total_links += 1\n link_counts += 1\n brand_counts += 1\n\n print(\"\"\"---------------------------------\n Stop crawling product URLs in Adayroi.com!\n ---------------------------------\"\"\")\n\n def getProductUrlCellPhoneS(self):\n brand_counts = 0\n brand_urls = list()\n url_and_numpage = dict()\n\n for brand in self.list_brand:\n url = 'https://cellphones.com.vn/mobile/' + brand + '.html'\n brand_urls.append(url)\n\n # Create a Header to help the beautifulSoup can crawl the web page\n headers = {'User-Agent': 'User-Agent:Mozilla/5.0'}\n\n for url in brand_urls:\n max_num_page = 1\n\n data1 = urllib.request.Request(url, headers=headers)\n data = urllib.request.urlopen(data1).read()\n try:\n soup = BeautifulSoup(data, \"lxml\")\n except Exception:\n print('Exception : ' + str(Exception))\n\n division = soup.find(\"div\", {\"class\": \"pages\"})\n print('Get numpage ' + url)\n if division is None:\n url_and_numpage.update({url: max_num_page})\n else:\n anchors = division.find_all('a')\n for anchor in anchors:\n if \"javascript\" in anchor.get('href'):\n continue\n if int(anchor.get('href')[-1]) > max_num_page:\n max_num_page = int(anchor.get('href')[-1])\n url_and_numpage.update({url: max_num_page})\n\n # =============================================================================\n # Pass all URLs of phones and tablet in Cellphones.com.vn into .txt file\n # =============================================================================\n\n for url, num_pages in url_and_numpage.items():\n link_counts = 0\n for page in range(1, num_pages + 1):\n url_with_pages = url + '?p=' + str(page)\n print('... Crawling ' + url_with_pages)\n\n data1 = urllib.request.Request(url_with_pages, headers=headers)\n data = urllib.request.urlopen(data1).read()\n\n try:\n soup = BeautifulSoup(data, \"lxml\")\n except Exception:\n print('Exception : ' + str(Exception))\n pass\n division = soup.find(\"div\", {\"class\": \"products-container\"})\n anchors = division.find_all('a')\n for anchor in anchors:\n href = anchor.get('href')\n if '?' in href:\n href = href.split('?')[:-1][0]\n self.links[self.list_brand[brand_counts], link_counts] = href\n self.total_links += 1\n link_counts += 1\n brand_counts += 1\n\n print(\"\"\"---------------------------------\n Stop crawling product URLs in Cellphones.com.vn!\n ---------------------------------\"\"\")\n\n def getProductUrlTheGioiDiDong(self):\n brand_counts = 0\n list_phones_in_web = [\"apple-iphone\", \"samsung\", \"oppo\", \"nokia\", \"asus-zenfone\", \"sony\", \"xiaomi\"]\n list_tablets_in_web = [\"apple\", \"samsung\", \"xiaomi\"]\n brand_urls = list()\n standard_phone_list = list() # This list to store the phones without all the memory type\n headers = {'User-Agent': 'User-Agent:Mozilla/5.0'}\n\n for brand in list_phones_in_web:\n url = 'https://www.thegioididong.com/dtdd-' + brand + '#i:2'\n brand_urls.append(url)\n\n for url in brand_urls:\n data1 = urllib.request.Request(url, headers=headers)\n data = urllib.request.urlopen(data1).read()\n\n try:\n print(\"Open URL : \" + url)\n soup = BeautifulSoup(data, \"lxml\")\n except Exception:\n print('Exception : ' + str(Exception))\n pass\n product_ul = soup.find(\"ul\", {\"class\": \"homeproduct filter-cate\"})\n anchors = product_ul.find_all('a')\n for anchor in anchors:\n href = anchor.get('href')\n href = 'https://www.thegioididong.com' + href\n if '?' in href:\n href = href.split('?')[:-1][0]\n standard_phone_list.append(href)\n\n for url in standard_phone_list:\n link_counts = 0\n data1 = urllib.request.Request(url, headers=headers)\n data = urllib.request.urlopen(data1).read()\n\n try:\n print(\"... Crawling \" + url)\n soup = BeautifulSoup(data, \"lxml\")\n except Exception:\n print('Exception : ' + str(Exception))\n pass\n\n # get the status of phone\n span_status = soup.find(\"span\", {\"class\": \"productstatus\"})\n # Check if the status is none\n if span_status is not None:\n # If the product 's status is \"Ngừng kinh doanh' then continue to the next url\n if 'Ngừng kinh doanh'.lower() in span_status.get_text().lower(): continue\n memmory_div = soup.find(\"span\", {\"class\": \"memory memory2 \"})\n # Check if there are more than 1 memory type of this phone\n if memmory_div is None:\n print('... Crawling ' + url)\n self.links[self.list_brand[brand_counts], link_counts] = url\n self.total_links += 1\n link_counts += 1\n else:\n anchors = memmory_div.find_all('a')\n for href in anchors:\n href = 'https://www.thegioididong.com' + href\n print('... Crawling ' + href)\n self.links[self.list_brand[brand_counts], link_counts] = href\n self.total_links += 1\n link_counts += 1\n","repo_name":"concalahan/binggodeals-crawl-python","sub_path":"fixed_crawl_url/link_finder.py","file_name":"link_finder.py","file_ext":"py","file_size_in_byte":11960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2429032533","text":"pratica = []\nprova = []\nfaltas = []\nfor nota in range(4):\n pratica.append(int(input('Digite a nota da {}º prática : '.format(nota+1))))\n prova.append(int(input('Digite a nota da {}º prova : '.format(nota+1))))\n faltas.append(int(input('Digite as faltas do {}º bimestre : '.format(nota+1))))\n\nsomaPratica = 0\nsomaProva = 0\nsomaFaltas = 0\nfor n in range(4):\n somaPratica = somaPratica + pratica[n]\n somaProva = somaProva + prova[n]\n somaFaltas = somaFaltas + faltas[n]\nmediaPratica = somaPratica / 4\nmediaProva = somaProva / 4\nPercentualFreq = (1 - somaFaltas / 40 )*100\n\nprint('Pratica Prova Média Geral')\nfor n in range(4):\n print(f' {pratica[n]} {prova[n]} = {(pratica[n]+prova[n])/2}')\nprint('--------------------------------')\nmediaGeral = (mediaPratica+mediaProva)/2\nprint(f' {mediaPratica} {mediaProva} = {mediaGeral}')\nprint('')\nprint('Frequência Geral = {}%'.format(PercentualFreq))\nprint('')\nif mediaGeral >= 7 and PercentualFreq >= 75:\n print('Aprovado')\nelif mediaGeral >= 5 and PercentualFreq >= 75:\n print('Recuperação')\nelse:\n print('Reprovado')\n","repo_name":"Guilherme-full/C-digos-em-Python","sub_path":"GVetorCompleto.py","file_name":"GVetorCompleto.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"28782193100","text":"import scrapy\nimport json\nfrom w3lib.url import add_or_replace_parameter\nclass MySpider(scrapy.Spider):\n name = 'myspider'\n start_urls = ['https://www.kaggle.com/datasets.json?sortBy=hottest&group=all&page=1']\n\n def parse(self, response):\n data = json.loads(response.body) \n total_results = data['totalDatasetListItems']\n page = 1\n # figure out how many pages are there and loop through them.\n for i in range(20, total_results, 20): # step 20 since we have 20 results per page\n url = add_or_replace_parameter(response.url, 'page', page)\n yield scrapy.Request(url, self.parse_page)\n\n # don't forget to parse first page as well!\n yield from self.parse_page(self, response)\n\n def parse_page(self, response):\n data = json.loads(response.body) \n # parse page data here\n for item in data['datasetListItems']:\n item = dict()\n pdb.set_trace()\n yield item\n","repo_name":"whatwehaveunlearned/sageBrain","sub_path":".ipynb_checkpoints/test-checkpoint.py","file_name":"test-checkpoint.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"20450625478","text":"\r\nimport numpy as np\r\nimport pandas as pd\r\nimport cross_vali\r\nimport data_preprocess as dp\r\n\r\n\r\ndef classify_data(data):\r\n labels, n_labels = np.unique(data.iloc[:, -1], return_counts=True)\r\n max_i = np.argmax(n_labels)\r\n return len(labels), labels[max_i]\r\n\r\n# sample n features for calculating bast split\r\ndef get_possible_splits(data, n_features):\r\n splits = []\r\n sample_columns = np.random.choice(data.shape[1], size=n_features, replace=False)\r\n sample_data = data.iloc[:, sample_columns]\r\n n_rows, n_columns = sample_data.shape\r\n for i in range(n_columns - 1):\r\n col = data.iloc[:,i]\r\n splits.append(np.unique(col))\r\n return splits\r\n\r\ndef split_data(data, column, value):\r\n split_column = data.iloc[:, column]\r\n left = data[split_column <= value]\r\n right = data[split_column > value]\r\n return left, right\r\n\r\ndef calculate_gini(left, right):\r\n p_left = len(left) / (len(left) + len(right))\r\n p_right = len(right) / (len(left) + len(right))\r\n c1_left = 0\r\n c0_left = 0\r\n c1_right = 0\r\n c0_right = 0\r\n if (len(left) > 0):\r\n c1_left = len(left[left.iloc[:, -1] == 1]) / len(left) \r\n c0_left = len(left[left.iloc[:, -1] == 0]) / len(left)\r\n gini_left = 1 - pow(c1_left, 2) - pow(c0_left, 2)\r\n if (len(right) > 0):\r\n c1_right = len(right[right.iloc[:, -1] == 1]) / len(right)\r\n c0_right = len(right[right.iloc[:, -1] == 0]) / len(right)\r\n gini_right = 1 - pow(c1_right, 2) - pow(c0_right, 2)\r\n \r\n gini = p_left * gini_left + p_right * gini_right\r\n\r\n return gini\r\n \r\ndef choose_split(data, possible_splits):\r\n min_gini = 1\r\n best_column = 0\r\n best_value = 0\r\n for i in range(len(possible_splits)):\r\n col = possible_splits[i]\r\n for val in col:\r\n left, right = split_data(data, i, val)\r\n gini = calculate_gini(left, right)\r\n if gini < min_gini:\r\n min_gini = gini\r\n best_column = i\r\n best_value = val\r\n return best_column, best_value\r\n\r\ndef create_tree(data, n_features):\r\n \r\n # If all items in the dataset have the same classification, return the classification.\r\n n_labels, classification = classify_data(data)\r\n if (n_labels == 1):\r\n return classification\r\n\r\n # recursively generate subtrees\r\n else:\r\n possible_splits = get_possible_splits(data, n_features)\r\n best_col, best_val = choose_split(data, possible_splits)\r\n question = \"{} <= {}\".format(best_col, best_val)\r\n tree = {}\r\n tree[question] = []\r\n data_left, data_right = split_data(data, best_col, best_val)\r\n tree_left = create_tree(data_left, n_features)\r\n tree_right = create_tree(data_right, n_features)\r\n tree[question].append(tree_left)\r\n tree[question].append(tree_right)\r\n return tree\r\n \r\ndef predict(row, tree):\r\n question = list(tree.keys())[0]\r\n attr, operator, val = question.split(\" \")\r\n if (row[int(attr)] <= float(val)):\r\n subtree = tree[question][0]\r\n else:\r\n subtree = tree[question][1]\r\n if not isinstance(subtree, dict):\r\n return subtree\r\n else:\r\n return predict(row, subtree)\r\n \r\ndef classify_dt(x, y, test, n_features):\r\n train = np.concatenate((np.array(x),np.array([y]).T), axis=1)\r\n train = pd.DataFrame(train)\r\n test = pd.DataFrame(test)\r\n test['label'] = 0 # here I fill the label column with dummmy values to make its dimension the same as the training set\r\n sample_indices = np.random.choice(len(x), size=len(x), replace=True)\r\n sample_train = train.iloc[sample_indices,:]\r\n dt = create_tree(train, n_features)\r\n # print(\"decision tree: \", dt)\r\n # print(test.apply(lambda x: predict(x, dt), axis=1))\r\n return test.apply(lambda x: predict(x, dt), axis=1)\r\n\r\n # todo: max_depth - regularization\r\n\r\ndef random_forest(x, y, test, parameter):\r\n n_trees, ratio = parameter\r\n n_features = round(x.shape[1] * ratio)\r\n prediction = pd.DataFrame()\r\n for i in range(n_trees):\r\n # sample with replacement for each tree\r\n sample_indices = np.random.choice(len(x), size=len(x), replace=True)\r\n prediction[i] = classify_dt(x, y, test, n_features)\r\n\r\n return prediction.max(axis=1)\r\n \r\nif __name__ == \"__main__\":\r\n x, y = dp.data_read(\"project3_dataset1.txt\")\r\n cross_vali.cross_validation(x, y, random_forest, (5, 0.5))","repo_name":"menglulul/Classification","sub_path":"random_forest.py","file_name":"random_forest.py","file_ext":"py","file_size_in_byte":4170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10400191080","text":"import io\nimport json\nimport os\nimport string\nfrom secrets import choice\n\nimport pytest\nimport yaml\nfrom oidcmsg.message import Message\nfrom oidcmsg.oauth2 import AuthorizationErrorResponse\nfrom oidcmsg.oidc import AccessTokenRequest\nfrom oidcmsg.oidc import AuthorizationRequest\nfrom oidcmsg.oidc import AuthorizationResponse\nfrom oidcmsg.oidc import TokenErrorResponse\n\nfrom oidcendpoint.cookie import CookieDealer\nfrom oidcendpoint.endpoint_context import EndpointContext\nfrom oidcendpoint.id_token import IDToken\nfrom oidcendpoint.oidc.add_on.pkce import CC_METHOD\nfrom oidcendpoint.oidc.authorization import Authorization\nfrom oidcendpoint.oidc.token import Token\n\nBASECH = string.ascii_letters + string.digits + \"-._~\"\n\nKEYDEFS = [\n {\"type\": \"RSA\", \"key\": \"\", \"use\": [\"sig\"]}\n # {\"type\": \"EC\", \"crv\": \"P-256\", \"use\": [\"sig\"]}\n]\n\nRESPONSE_TYPES_SUPPORTED = [\n [\"code\"],\n [\"token\"],\n [\"id_token\"],\n [\"code\", \"token\"],\n [\"code\", \"id_token\"],\n [\"id_token\", \"token\"],\n [\"code\", \"token\", \"id_token\"],\n [\"none\"],\n]\n\nCAPABILITIES = {\n \"subject_types_supported\": [\"public\", \"pairwise\"],\n \"grant_types_supported\": [\n \"authorization_code\",\n \"implicit\",\n \"urn:ietf:params:oauth:grant-type:jwt-bearer\",\n \"refresh_token\",\n ],\n}\n\nCLAIMS = {\"id_token\": {\"given_name\": {\"essential\": True}, \"nickname\": None}}\n\nAUTH_REQ = AuthorizationRequest(\n client_id=\"client_1\",\n redirect_uri=\"https://example.com/cb\",\n scope=[\"openid\"],\n state=\"STATE\",\n response_type=\"code\",\n)\n\nTOKEN_REQ = AccessTokenRequest(\n client_id=\"client_1\",\n redirect_uri=\"https://example.com/cb\",\n state=\"STATE\",\n grant_type=\"authorization_code\",\n client_secret=\"hemligt\",\n)\n\nAUTH_REQ_DICT = AUTH_REQ.to_dict()\n\nBASEDIR = os.path.abspath(os.path.dirname(__file__))\n\n\ndef full_path(local_file):\n return os.path.join(BASEDIR, local_file)\n\n\nUSERINFO_db = json.loads(open(full_path(\"users.json\")).read())\n\nclient_yaml = \"\"\"\noidc_clients:\n client_1:\n \"client_secret\": 'hemligt'\n \"redirect_uris\":\n - ['https://example.com/cb', '']\n \"client_salt\": \"salted\"\n 'token_endpoint_auth_method': 'client_secret_post'\n 'response_types':\n - 'code'\n - 'token'\n - 'code id_token'\n - 'id_token'\n - 'code id_token token'\n client2:\n client_secret: \"spraket\"\n redirect_uris:\n - ['https://app1.example.net/foo', '']\n - ['https://app2.example.net/bar', '']\n response_types:\n - code\n client3:\n client_secret: '2222222222222222222222222222222222222222'\n redirect_uris:\n - ['https://127.0.0.1:8090/authz_cb/bobcat', '']\n post_logout_redirect_uris:\n - ['https://openidconnect.net/', '']\n response_types:\n - code\n\"\"\"\n\n\n@pytest.fixture\ndef conf():\n return {\n \"issuer\": \"https://example.com/\",\n \"password\": \"mycket hemligt zebra\",\n \"token_expires_in\": 600,\n \"grant_expires_in\": 300,\n \"refresh_token_expires_in\": 86400,\n \"verify_ssl\": False,\n \"capabilities\": CAPABILITIES,\n \"keys\": {\"uri_path\": \"static/jwks.json\", \"key_defs\": KEYDEFS},\n \"id_token\": {\n \"class\": IDToken,\n \"kwargs\": {\n \"available_claims\": {\n \"email\": {\"essential\": True},\n \"email_verified\": {\"essential\": True},\n }\n },\n },\n \"endpoint\": {\n \"authorization\": {\n \"path\": \"{}/authorization\",\n \"class\": Authorization,\n \"kwargs\": {},\n },\n \"token\": {\n \"path\": \"{}/token\",\n \"class\": Token,\n \"kwargs\": {\n \"client_authn_method\": [\n \"client_secret_post\",\n \"client_secret_basic\",\n \"client_secret_jwt\",\n \"private_key_jwt\",\n ]\n },\n },\n },\n \"authentication\": {\n \"anon\": {\n \"acr\": \"http://www.swamid.se/policy/assurance/al1\",\n \"class\": \"oidcendpoint.user_authn.user.NoAuthn\",\n \"kwargs\": {\"user\": \"diana\"},\n }\n },\n \"template_dir\": \"template\",\n \"add_on\": {\n \"pkce\": {\n \"function\": \"oidcendpoint.oidc.add_on.pkce.add_pkce_support\",\n \"kwargs\": {\"essential\": True},\n }\n },\n \"cookie_dealer\": {\n \"class\": CookieDealer,\n \"kwargs\": {\n \"sign_key\": \"ghsNKDDLshZTPn974nOsIGhedULrsqnsGoBFBLwUKuJhE2ch\",\n \"default_values\": {\n \"name\": \"oidcop\",\n \"domain\": \"127.0.0.1\",\n \"path\": \"/\",\n \"max_age\": 3600,\n },\n },\n },\n }\n\n\ndef unreserved(size=64):\n return \"\".join(choice(BASECH) for _ in range(size))\n\n\ndef _code_challenge():\n \"\"\"\n PKCE aka RFC 7636\n \"\"\"\n # code_verifier: string of length cv_len\n code_verifier = unreserved(64)\n\n _method = \"S256\"\n\n # Pick hash method\n _hash_method = CC_METHOD[_method]\n # base64 encode the hash value\n code_challenge = _hash_method(code_verifier)\n\n return {\n \"code_challenge\": code_challenge,\n \"code_challenge_method\": _method,\n \"code_verifier\": code_verifier,\n }\n\n\ndef create_endpoint(config):\n endpoint_context = EndpointContext(config)\n _clients = yaml.safe_load(io.StringIO(client_yaml))\n endpoint_context.cdb = _clients[\"oidc_clients\"]\n endpoint_context.keyjar.import_jwks(\n endpoint_context.keyjar.export_jwks(True, \"\"), config[\"issuer\"]\n )\n return endpoint_context\n\n\nclass TestEndpoint(object):\n @pytest.fixture(autouse=True)\n def create_endpoint(self, conf):\n endpoint_context = create_endpoint(conf)\n self.session_manager = endpoint_context.session_manager\n self.authn_endpoint = endpoint_context.endpoint[\"authorization\"]\n self.token_endpoint = endpoint_context.endpoint[\"token\"]\n\n def test_unsupported_code_challenge_methods(self, conf):\n conf[\"add_on\"][\"pkce\"][\"kwargs\"][\"code_challenge_methods\"] = [\"dada\"]\n\n with pytest.raises(ValueError) as exc:\n create_endpoint(conf)\n\n assert exc.value.args[0] == \"Unsupported method: dada\"\n\n def test_parse(self):\n _cc_info = _code_challenge()\n _authn_req = AUTH_REQ.copy()\n _authn_req[\"code_challenge\"] = _cc_info[\"code_challenge\"]\n _authn_req[\"code_challenge_method\"] = _cc_info[\"code_challenge_method\"]\n\n _pr_resp = self.authn_endpoint.parse_request(_authn_req.to_dict())\n resp = self.authn_endpoint.process_request(_pr_resp)\n\n assert isinstance(resp[\"response_args\"], AuthorizationResponse)\n\n _token_request = TOKEN_REQ.copy()\n _token_request[\"code\"] = resp[\"response_args\"][\"code\"]\n _token_request[\"code_verifier\"] = _cc_info[\"code_verifier\"]\n _req = self.token_endpoint.parse_request(_token_request)\n\n assert isinstance(_req, Message)\n\n def test_no_code_challenge_method(self):\n _cc_info = _code_challenge()\n _authn_req = AUTH_REQ.copy()\n _authn_req[\"code_challenge\"] = _cc_info[\"code_challenge\"]\n\n _pr_resp = self.authn_endpoint.parse_request(_authn_req.to_dict())\n resp = self.authn_endpoint.process_request(_pr_resp)\n\n assert isinstance(resp[\"response_args\"], AuthorizationResponse)\n\n session_info = self.session_manager.get_session_info_by_token(\n resp[\"response_args\"][\"code\"],\n grant=True)\n\n session_info[\"grant\"].authorization_request[\"code_challenge_method\"] = \"plain\"\n\n _token_request = TOKEN_REQ.copy()\n _token_request[\"code\"] = resp[\"response_args\"][\"code\"]\n _token_request[\"code_verifier\"] = _cc_info[\"code_challenge\"]\n _req = self.token_endpoint.parse_request(_token_request)\n\n assert isinstance(_req, Message)\n\n def test_no_code_challenge(self):\n _authn_req = AUTH_REQ.copy()\n\n _pr_resp = self.authn_endpoint.parse_request(_authn_req.to_dict())\n\n assert isinstance(_pr_resp, AuthorizationErrorResponse)\n assert _pr_resp[\"error\"] == \"invalid_request\"\n assert _pr_resp[\"error_description\"] == \"Missing required code_challenge\"\n\n def test_not_essential(self, conf):\n conf[\"add_on\"][\"pkce\"][\"kwargs\"][\"essential\"] = False\n endpoint_context = create_endpoint(conf)\n authn_endpoint = endpoint_context.endpoint[\"authorization\"]\n token_endpoint = endpoint_context.endpoint[\"token\"]\n _authn_req = AUTH_REQ.copy()\n\n _pr_resp = authn_endpoint.parse_request(_authn_req.to_dict())\n resp = authn_endpoint.process_request(_pr_resp)\n\n assert isinstance(resp[\"response_args\"], AuthorizationResponse)\n\n _token_request = TOKEN_REQ.copy()\n _token_request[\"code\"] = resp[\"response_args\"][\"code\"]\n _req = token_endpoint.parse_request(_token_request)\n\n assert isinstance(_req, Message)\n\n def test_unknown_code_challenge_method(self):\n _authn_req = AUTH_REQ.copy()\n _authn_req[\"code_challenge\"] = \"aba\"\n _authn_req[\"code_challenge_method\"] = \"doupa\"\n\n _pr_resp = self.authn_endpoint.parse_request(_authn_req.to_dict())\n\n assert isinstance(_pr_resp, AuthorizationErrorResponse)\n assert _pr_resp[\"error\"] == \"invalid_request\"\n assert _pr_resp[\n \"error_description\"\n ] == \"Unsupported code_challenge_method={}\".format(\n _authn_req[\"code_challenge_method\"]\n )\n\n def test_unsupported_code_challenge_method(self, conf):\n conf[\"add_on\"][\"pkce\"][\"kwargs\"][\"code_challenge_methods\"] = [\"plain\"]\n endpoint_context = create_endpoint(conf)\n authn_endpoint = endpoint_context.endpoint[\"authorization\"]\n\n _cc_info = _code_challenge()\n _authn_req = AUTH_REQ.copy()\n _authn_req[\"code_challenge\"] = _cc_info[\"code_challenge\"]\n _authn_req[\"code_challenge_method\"] = _cc_info[\"code_challenge_method\"]\n\n _pr_resp = authn_endpoint.parse_request(_authn_req.to_dict())\n\n assert isinstance(_pr_resp, AuthorizationErrorResponse)\n assert _pr_resp[\"error\"] == \"invalid_request\"\n assert _pr_resp[\n \"error_description\"\n ] == \"Unsupported code_challenge_method={}\".format(\n _authn_req[\"code_challenge_method\"]\n )\n\n def test_wrong_code_verifier(self):\n _cc_info = _code_challenge()\n _authn_req = AUTH_REQ.copy()\n _authn_req[\"code_challenge\"] = _cc_info[\"code_challenge\"]\n _authn_req[\"code_challenge_method\"] = _cc_info[\"code_challenge_method\"]\n\n _pr_resp = self.authn_endpoint.parse_request(_authn_req.to_dict())\n resp = self.authn_endpoint.process_request(_pr_resp)\n\n _token_request = TOKEN_REQ.copy()\n _token_request[\"code\"] = resp[\"response_args\"][\"code\"]\n _token_request[\"code_verifier\"] = \"aba\"\n resp = self.token_endpoint.parse_request(_token_request)\n\n assert isinstance(resp, TokenErrorResponse)\n assert resp[\"error\"] == \"invalid_grant\"\n assert resp[\"error_description\"] == \"PKCE check failed\"\n\n def test_no_code_verifier(self):\n _cc_info = _code_challenge()\n _authn_req = AUTH_REQ.copy()\n _authn_req[\"code_challenge\"] = _cc_info[\"code_challenge\"]\n _authn_req[\"code_challenge_method\"] = _cc_info[\"code_challenge_method\"]\n\n _pr_resp = self.authn_endpoint.parse_request(_authn_req.to_dict())\n resp = self.authn_endpoint.process_request(_pr_resp)\n\n _token_request = TOKEN_REQ.copy()\n _token_request[\"code\"] = resp[\"response_args\"][\"code\"]\n resp = self.token_endpoint.parse_request(_token_request)\n\n assert isinstance(resp, TokenErrorResponse)\n assert resp[\"error\"] == \"invalid_grant\"\n assert resp[\"error_description\"] == \"Missing code_verifier\"\n","repo_name":"IdentityPython/oidcendpoint","sub_path":"tests/test_33_pkce.py","file_name":"test_33_pkce.py","file_ext":"py","file_size_in_byte":12049,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"} +{"seq_id":"7569762321","text":"from cProfile import label\nfrom cgitb import text\nimport tkinter as tk\nfrom tkinter import END, font\nfrom tkinter import messagebox\nfrom turtle import clear, up\nimport mysql.connector\nfrom numpy import place\nfrom time import sleep\nfrom tkinter import ttk\n\n\n\nroot=tk.Tk()\nroot.geometry(\"800x500\")\nroot.title(\"Race Builder\")\nl1=tk.Label(root,text=\"Enter Data\", font=(\"times\",14,\"bold\",\"underline\"))\nl1.place(x=300, y=5)\n#form\nl2=tk.Label(root,text=\"Full Name:\")\nl2.place(x=10,y=50)\nt1=tk.Entry(root)\nt1.place(x=125,y=50)\n#name\nl3=tk.Label(root,text=\"Runner SID\")\nl3.place(x=10,y=85)\nt2=tk.Entry(root)\nt2.place(x=125,y=85)\n#place\nl4=tk.Label(root,text=\"Place\")\nl4.place(x=10,y=120)\nt3=tk.Entry(root)\nt3.place(x=125,y=120)\n\n\n \n\ndef add():\n \n ruid1=t2.get()\n rname1=t1.get()\n rplace1=t3.get()\n\n \n mydb=mysql.connector.connect(\n host=\"localhost\",\n database=\"RB3\",\n user=\"root\",\n password=\"tyu@3434\"\n \n )\n print(\"Connection Opened\")\n cursor=mydb.cursor()\n \n\n sql=(\"insert into runners (ruid,rname,rplace) values (%s, %s,%s) \")\n val=(ruid1,rname1,rplace1)\n cursor.execute(sql,val)\n \n print(\"Values Registrated\")\n \n mydb.commit()\n \n mydb.close()\n print(\"Connection Closed\")\n cleartable()\n show()\ndef searchname():\n\n \n try: \n rname1=t1.get() \n mydb=mysql.connector.connect(\n host=\"localhost\",\n database=\"RB3\",\n user=\"root\",\n password=\"tyu@3434\"\n \n )\n cursor=mydb.cursor()\n sql=(\"select rname,ruid,rplace from runners where rname=%s\") \n val=(rname1,)\n cursor.execute(sql,val)\n t1.delete(0,END)\n t2.delete(0,END)\n t3.delete(0,END)\n \n \n records=cursor.fetchall()\n t1.insert(0,records[0][0])\n t2.insert(0,records[0][1])\n t3.insert(0,records[0][2])\n \n \n print(records)\n except:\n messagebox.showinfo(\"Information\",\"Name Is Not Located In Database\") \ndef searchplace():\n try: \n rplace1=t3.get() \n mydb=mysql.connector.connect(\n host=\"localhost\",\n database=\"RB3\",\n user=\"root\",\n password=\"tyu@3434\"\n \n )\n cursor=mydb.cursor()\n sql=(\"select rname,ruid,rplace from runners where rplace=%s\") \n val=(rplace1,)\n cursor.execute(sql,val)\n t1.delete(0,END)\n t2.delete(0,END)\n t3.delete(0,END)\n \n \n records=cursor.fetchall()\n t1.insert(0,records[0][0])\n t2.insert(0,records[0][1])\n t3.insert(0,records[0][2])\n \n \n print(records)\n except:\n messagebox.showinfo(\"Information\",\"Name Is Not Located In Database\") \ndef update():\n rname1=t1.get()\n ruid1=t2.get()\n rplace1=t3.get()\n \n mydb=mysql.connector.connect(\n host=\"localhost\",\n database=\"RB3\",\n user=\"root\",\n password=\"tyu@3434\"\n \n )\n print(\"Connection Opened\")\n cursor=mydb.cursor()\n \n\n sql=(\"update runners set rname=%s,rplace=%s where ruid=%s \")\n val=(rname1,rplace1,ruid1)\n cursor.execute(sql,val)\n mydb.commit()\n messagebox.showinfo(\"Information\",\"Records Updated Succesfully\")\n \n \n \n \n \n cleartable()\n show()\ndef clearscreen():\n t1.delete(0,END)\n t2.delete(0,END)\n t3.delete(0,END)\ndef show():\n try: \n empnamea=t2.get() \n mydb=mysql.connector.connect(\n host=\"localhost\",\n database=\"RB3\",\n user=\"root\",\n password=\"tyu@3434\"\n \n )\n cursor=mydb.cursor()\n sql=(\"select ruid,rname,rplace from runners \") \n \n cursor.execute(sql)\n records=cursor.fetchall()\n print(records)\n for i,(ruid,rname,rplace) in enumerate(records,start=1):\n list1.insert(\"\",\"end\",values=(ruid,rname,rplace))\n mydb.close()\n\n except:\n messagebox.showinfo(\"Information\",\"Name Is Not Located In Database\")\n \ndef cleartable():\n for item in list1.get_children():\n list1.delete(item)\ndef Value1(event):\n t1.delete(0, END)\n t2.delete(0, END)\n t3.delete(0, END)\n \n rowsid=list1.selection()[0]\n select=list1.set(rowsid)\n t1.insert(0,select[\"ruid\"])\n t2.insert(0,select[\"rname\"])\n t3.insert(0,select[\"rplace\"])\ndef delete():\n ruid1=t1.get()\n rname1=t2.get()\n rplace1=t3.get()\n \n mydb=mysql.connector.connect(\n host=\"localhost\",\n database=\"RB3\",\n user=\"root\",\n password=\"tyu@3434\"\n \n )\n print(\"Connection Opened\")\n cursor=mydb.cursor()\n \n\n sql=(\"DELETE FROM runners WHERE ruid=%s\")\n val=(ruid1,)\n cursor.execute(sql,val)\n mydb.commit()\n messagebox.showinfo(\"Information\",\"Records Deleted Succesfully\")\n cleartable()\n show() \n \n\n#button to add\nb1=tk.Button(root,text=\"Add\", command=add)\nb1.place(x=500,y=50)\nb5=tk.Button(root,text=\"Search By Place\",command=searchplace)\nb5.place(x=500,y=100)\nb6=tk.Button(root,text=\"Clear Data Entry\",command=clearscreen)\nb6.place(x=500,y=200)\nb7=tk.Button(root,text=\"Update Via RUID\", command=update)\nb7.place(x=500,y=250)\n\nb4=tk.Button(root,text=\"Search By Name\",command=searchname)\nb4.place(x=500,y=300)\nb5=tk.Button(root,text=\"Search By Name\",command=searchname)\nb5.place(x=500,y=300)\n\n#MYSQL\ncols=(\"ruid\",\"rname\",\"rplace\")\nlist1=ttk.Treeview(root,columns=cols,show=\"headings\")\nfor coll in cols:\n list1.heading(coll,text=coll)\n list1.grid(row=1,column=0,columnspan=2)\n list1.place(x=10,y=250)\n\nshow()\nlist1.bind('',Value1)\n\nroot.mainloop()","repo_name":"interestingstuf/Code-2","sub_path":"RaceBuilderV/Race Builder 3.0/Base.py","file_name":"Base.py","file_ext":"py","file_size_in_byte":5588,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"28765657765","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 13 22:41:55 2018\n\n@author: USER\n\"\"\"\n#%%\nimport requests as rq\nfrom bs4 import BeautifulSoup\nimport lxml\n\nitem =[]\nfor i in range(1,5):\n url = \"https://search.shopping.naver.com/search/all.nhn?origQuery={}&pagingIndex={}&pagingSize=40&viewType=list&sort=rel&frm=NVSHPAG&query={}\".format(\"치즈\",i,\"치즈\")\n item.append(url)\n\nprint(item)\n#res = rq.get(url_bundle)\n#res.url\n#html = res.text\n#soup = BeautifulSoup(html, 'lxml')\nlen(item)\n\n#%%\nsoup_html=[]\nnum_item=len(item)\n\nfor i in range(0, num_item):\n res = rq.get(item[i])\n soup = BeautifulSoup(res.content, 'lxml')\n soup_item= soup.find_all(\"li\", class_=\"_itemSection\")\n soup_html.append(soup_item)\n\nnum=len(soup_find)\nnum_item\n#%%\n\n\ntitle = []\nprice_sales = []\nreview_num = []\ncompany = []\n\nfor k in range(0, num_item):\n res = rq.get(item[k])\n soup = BeautifulSoup(res.content, 'lxml')\n soup_item = soup.find_all(\"li\", class_=\"_itemSection\")\n for i in range(0, num):\n # 상품명\n soup_title = soup_item[i].find(\"div\",class_=\"info\")\n if soup_title is not None:\n title_txt = soup_title.a.text\n print(title_txt)\n title.append(title_txt)\n else:\n title.append(\"\")\n \n # 상품가격(할인적용금액)\n soup_price = soup_item[i].find(\"span\", class_=\"num\")\n if soup_price is not None:\n price_txt = soup_price.text\n price_sales.append(price_txt)\n else:\n price_sales.append(\"\")\n print(price_sales[i])\n \n # 리뷰\n soup_review = soup_item[i].find(\"a\", class_=\"graph\")\n if soup_review is not None:\n review_num_txt = soup_review.em.text\n review_num.append(review_num_txt)\n else:\n review_num.append(\"\")\n print(review_num[i])\n \n # 회사\n soup_multi = soup_item[i].find(\"a\", class_=\"btn_compare\")\n \n if soup_multi is not None:\n soup_company1 = soup_item[i].find(\"span\", class_=\"mall_name\")\n soup_company_txt = soup_company1.text\n company.append(soup_company_txt)\n else : \n soup_company2 = soup_item[i].find(\"P\", class_=\"mall_text\")\n soup_company3 = soup_item[i].find(\"p\", class_=\"mall_txt\")\n if soup_company2 is not None:\n soup_company_txt = soup_company2.a.text\n company.append(soup_company_txt)\n elif soup_company3 is not None:\n soup_company_txt = soup_company3.a.text\n if soup_company_txt:\n company.append(soup_company_txt)\n else :\n soup_company_txt = soup_company3.a.img['alt']\n company.append(soup_company_txt)\n else :\n company.append(\"\")\n print(company[i])\n#%%\nimport pandas as pd\n\nprint(title, len(title))\nprint(price_sales, len(price_sales))\nprint(review_num, len(review_num))\nprint(company, len(company))\n\ntitle1 = pd.Series(title)\nprice_sales1 = pd.Series(price_sales)\nreview_num1 = pd.Series(review_num)\ncompany_name = pd.Series(company) \n\ndat = pd.DataFrame({ \"title\" : title , \n \"price_sales\" : price_sales, \n \"review_num\" : review_num,\n \"company_name\" : company_name }, columns=['title','price_sales','review_num', \"company_name\"] )\ndat.info() \n#%% 데이터 베이스 연결\nimport mysql.connector\nmydb = mysql.connector.connect(\n host =\"localhost\",\n user =\"root\",\n passwd =\"qwer1234\"\n)\n\nprint(mydb)\n\nmydb = mysql.connector.connect(\n host =\"localhost\",\n user =\"root\",\n passwd =\"qwer1234\",\n database=\"mydatabase\"\n \n)\n\nprint(mydb)\n\nmycursor = mydb.cursor()\n\n#%%데이터 베이스 설계\nmycursor.execute(\"CREATE TABLE naver (id INT PRIMARY KEY AUTO_INCREMENT, title VARCHAR(64), price_sales VARCHAR(64), review_num VARCHAR(64), company_name VARCHAR(255))\")\n\n#%%데이터 베이스에 데이터 넣기\nmycursor = mydb.cursor()\ndat_list=dat.values.tolist()\nfor i in range(0,88):\n sql = \"INSERT INTO auction ( title , price_sales , review_num , company_name ) VALUES (%s,%s,%s,%s)\"\n val = (dat_list[i])\n mycursor.execute(sql, val)\n\n\n\n\n\n\n\n\n\n","repo_name":"namu2018/crawling","sub_path":"과제_데이터 수집/naver_shopping_homework.py","file_name":"naver_shopping_homework.py","file_ext":"py","file_size_in_byte":4299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"72757468168","text":"M, N = map(int, input().split(\" \"))\nlista = []\n\nwhile M>0 and N>0:\n if(M= yr\r\n OblData[(yr-2021+(pd.to_datetime(payday) - pd.Timestamp(today))/datetime.timedelta(days=365))] = OblData[\"Kupon\"]*alive\r\n OblData[(yr-2021+(pd.to_datetime(payday) - pd.Timestamp(today))/datetime.timedelta(days=365))] += (pd.DatetimeIndex(OblData[\"Udløbsdato\"]).year == yr) *100 #OblData[\"Åbningskurs\"]\r\nOblData = OblData.sort_values(by=[\"Maturity\"])\r\n\r\n\r\n\r\nexcelBook = openpyxl.load_workbook('C:\\\\Users\\\\Jonas\\\\OneDrive\\\\UNI\\\\IFE\\\\Yields.xlsx')\r\nwith pd.ExcelWriter('C:\\\\Users\\\\Jonas\\\\OneDrive\\\\UNI\\\\IFE\\\\Yields.xlsx', engine=\"openpyxl\") as writer:\r\n writer.book = excelBook\r\n writer.sheets = dict((ws.title, ws) for ws in excelBook.worksheets)\r\n OblData.to_excel(writer, sheet_name=\"Data\")\r\n writer.save()\r\n","repo_name":"EdinMahmutovic/IntroToFE","sub_path":"EXCEL_week38/uge38.py","file_name":"uge38.py","file_ext":"py","file_size_in_byte":2530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"28991251920","text":"import random\nfrom tkinter import *\nimport pandas\n\nBACKGROUND_COLOR = \"#B1DDC6\"\n\ndata = pandas.read_csv(\"data/english_words.csv\")\n# word_list = data.to_dict(orient=\"records\")\ncurrent_word = {}\nto_learn = {}\n\ntry:\n data = pandas.read_csv(\"data/words_to_learn.csv\")\nexcept FileNotFoundError:\n original_data = pandas.read_csv(\"data/english_words.csv\")\n word_list = original_data.to_dict(orient=\"records\")\nelse:\n word_list = data.to_dict(orient=\"records\")\n\n\ndef next_card():\n global current_word\n current_word = random.choice(word_list)\n canvas.itemconfig(card_title, text=\"English\", fill=\"black\")\n canvas.itemconfig(card_word, text=current_word[\"English\"], fill=\"black\")\n canvas.itemconfig(card_background, image=front_image)\n windows.after(4000, func=hindi_card)\n\n\ndef hindi_card():\n global current_word\n canvas.itemconfig(card_title, text=\"Hindi\", fill=\"white\")\n canvas.itemconfig(card_word, text=current_word[\"Hindi\"], fill=\"white\")\n canvas.itemconfig(card_background, image=back_image)\n windows.after(4000, func=next_card)\n\n\ndef remove_word():\n global current_word\n word_list.remove(current_word)\n data = pandas.DataFrame(word_list)\n data.to_csv(\"data/words_to_learn.csv\", index=False)\n words_left.config(text=f\"Words Remaining : {len(word_list)}\")\n\n\nwindows = Tk()\nwindows.title(\"Flashy\")\nwindows.config(bg=BACKGROUND_COLOR, pady=20, padx=20)\n\n# windows.after(5000, func=hindi_card)\n\nfront_image = PhotoImage(file=\"images/card_front.png\")\nback_image = PhotoImage(file=\"images/card_back.png\")\ncanvas = Canvas(height=526, width=800, highlightthickness=0, bg=BACKGROUND_COLOR)\ncard_background = canvas.create_image(400, 263, image=front_image)\ncard_title = canvas.create_text(400, 150, text=\"\", font=(\"ariel\", 50, \"italic\"))\ncard_word = canvas.create_text(400, 263, text=\"\", font=(\"ariel\", 60, \"bold\"))\ncanvas.grid(row=1, column=1)\n\nright_image = PhotoImage(file=\"images/right.png\")\nright = Button(image=right_image, highlightthickness=0, command=remove_word)\nright.grid(row=2, column=1)\n\ninformation = Label()\ninformation.config(text=\"Press ✔ if you know the word, It won't be repeated again\")\ninformation.config(background=BACKGROUND_COLOR, font=(\"ariel\", 15, \"normal\"), fg=\"sea green\", pady=10)\ninformation.grid(row=3, column=1)\n\nwords_left = Label()\nwords_left.config(text=f\"Words Remaining : {len(word_list)}\")\nwords_left.config(background=BACKGROUND_COLOR, font=(\"ariel\", 15, \"normal\"), fg=\"sea green\", pady=10)\nwords_left.grid(row=0, column=1)\n\nnext_card()\n\nwindows.mainloop()\n\n\n","repo_name":"SHIVAM-MANDHAN/Learning-English-by-flash-cards","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22839676344","text":"class Solution:\n def canChange(self, start: str, target: str) -> bool:\n s1 = []\n s2 = []\n for i in range(len(start)):\n if start[i] != '_':\n s1.append([start[i], i])\n if target[i] != '_':\n s2.append([target[i], i])\n \n if len(s1) != len(s2):\n return False\n \n for i in range(len(s1)):\n if s1[i][0] != s2[i][0]:\n return False\n if s1[i][0] == 'L' and s1[i][1] < s2[i][1]:\n return False\n if s1[i][0] == 'R' and s1[i][1] > s2[i][1]:\n return False\n return True","repo_name":"Aayush65/LeetCodeSolutions","sub_path":"2337-move-pieces-to-obtain-a-string/2337-move-pieces-to-obtain-a-string.py","file_name":"2337-move-pieces-to-obtain-a-string.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"40918908103","text":"dict = {0 : 1}\n\ndef infseq(N, P, Q, X, Y):\n a = N // P - X\n b = N // Q - Y\n if N <= 0:\n return 1\n if N in dict.keys():\n return dict[N]\n if not a in dict.keys():\n dict[a] = infseq(a, P, Q, X, Y)\n if not b in dict.keys():\n dict[b] = infseq(b, P, Q, X, Y)\n dict[N] = dict[a] + dict[b]\n return dict[N]\n\nN, P, Q, X, Y = map(int, input().split())\n\nprint(infseq(N, P, Q, X, Y))\n","repo_name":"junhochoi-dev/algorithm","sub_path":"[PS]BOJ/01354/boj01354.py","file_name":"boj01354.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"4464130515","text":"import wx\r\n\r\n\r\nclass MainPanel(wx.Panel):\r\n\r\n def __init__(self, parent):\r\n wx.Panel.__init__(self, parent=parent)\r\n self.frame = parent\r\n sizer = wx.BoxSizer(wx.VERTICAL)\r\n hSizer = wx.BoxSizer(wx.HORIZONTAL)\r\n\r\n label = \"85,2 кВт\"\r\n btn = wx.Button(self, label=label)\r\n sizer.Add(btn, 0, wx.ALL, 5)\r\n hSizer.Add((1, 1), 1, wx.EXPAND)\r\n hSizer.Add(sizer, 0, wx.TOP, 180)\r\n hSizer.Add((1, 1), 0, wx.ALL, 120)\r\n self.SetSizer(hSizer)\r\n self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)\r\n\r\n def OnEraseBackground(self, evt):\r\n dc = evt.GetDC()\r\n if not dc:\r\n dc = wx.ClientDC(self)\r\n rect = self.GetUpdateRegion().GetBox()\r\n dc.SetClippingRect(rect)\r\n\r\n dc.Clear()\r\n bmp = wx.Bitmap(\"kgis2199.jpg\")\r\n dc.DrawBitmap(bmp, 0, 0)\r\n\r\n\r\nclass MainFrame(wx.Frame):\r\n\r\n def __init__(self):\r\n wx.Frame.__init__(self, None, size=(810, 530))\r\n panel = MainPanel(self)\r\n self.Center()\r\n\r\n\r\nclass Main(wx.App):\r\n\r\n def __init__(self, redirect=False, filename=None):\r\n wx.App.__init__(self, redirect, filename)\r\n dlg = MainFrame()\r\n dlg.Show()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app = Main()\r\n app.MainLoop()","repo_name":"fruitpicker01/UPE","sub_path":"Viz1.py","file_name":"Viz1.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5055026577","text":"\"\"\" users.py handle users data \"\"\"\n\nimport re\nimport os\nimport subprocess\nimport json\nimport tools\n\nIS_VSC = re.compile('^vsc(\\d+)$')\nGET_HOME = re.compile('Directory: ([^\\s]+)')\nGET_VSC_UNIV = re.compile('/user/([^/]+)/')\nIS_ULB = re.compile('/.*ulb.*/')\nIS_VUB = re.compile('/.*vub.*/')\nIS_SCC = re.compile('/.*scc.*/')\nUNKNOWN_USER = re.compile('no such user')\n\n_USERS_CACHE = dict() # Cache processed users\n\ndef account_data(username):\n \"\"\" Will determine user origin (SISC, VSC or CECI) and\n return a dictionnary with user associated data\n\n Args:\n username: username\n\n Returns:\n A dictionnary with information about the user.\n\n User data dictionnary keys with values: 'username', 'home_dir', 'work_dir', 'ssh_key', 'origin'\n User data dictionnary keys with booleans: 'sisc', 'vsc', 'ceci'\n \"\"\"\n\n # Check the cache\n if username in _USERS_CACHE:\n return _USERS_CACHE[username]\n\n # Init user entry\n user = dict()\n user['username'] = username\n user['univ'] = 'unknown'\n user['group'] = 'unknown'\n\n # Collect finger data\n if finger(user) == 0:\n user['origin'] = 'unknown'\n user['group'] = username\n user['univ'] = 'unknown'\n elif IS_VSC.match(username) != None:\n user['origin'] = 'vsc'\n user['group'] = username\n matches = GET_VSC_UNIV.match(user['ldap_home'])\n if matches == None:\n tools.warning('failed to extract university for VSC user ' +\n username + ' from home dir ' + user['ldap_home'])\n else:\n user['univ'] = matches.group(1)\n elif IS_ULB.match(user['home_path']):\n user['univ'] = 'ulb'\n user['origin'] = 'sisc'\n elif IS_VUB.match(user['home_path']):\n user['univ'] = 'vub'\n user['origin'] = 'sisc'\n elif IS_SCC.match(user['home_path']):\n user['univ'] = 'sisc'\n user['origin'] = 'sisc'\n else:\n user['univ'] = 'unknown'\n tools.warning('failed to determine university for user ' + username +\n ' from home dir ' + user['home_path'])\n\n # print \"Added user \", user['username'], ' - ', user['group']\n global _USERS_CACHE\n _USERS_CACHE[username] = user\n\n return user\n\ndef finger(user):\n \"\"\" Execute finger on the passed user entry and return collected data.\n Entry must have ['username'] defined.\n\n Args:\n user: user name\n\n Returns:\n 1 or 0\n\n \"\"\"\n\n user['ldap_home'] = ''\n\n # Must use 2.6 style as still in place on mn05\n # finger_cmd = subprocess.Popen(\n # [\"finger\", user['username']],\n # stdout=subprocess.PIPE)\n # output = finger_cmd.stdout.read()\n output = subprocess.check_output(['finger', user['username']], stderr=subprocess.STDOUT)\n output = output.decode('utf-8')\n # print(\"output = \", output.decode('utf-8'))\n if UNKNOWN_USER.search(output) is not None:\n user['home_path'] = ''\n return 0\n\n # Extract from output\n matches = GET_HOME.search(output)\n if matches == None:\n tools.warning('failed to extrat home dir for ' +\n user['username'] + ' in finger output ' + output)\n else:\n user['ldap_home'] = matches.group(1)\n\n if user['ldap_home'] != '':\n user['home_path'] = os.path.realpath(user['ldap_home'])\n else:\n user['home_path'] = ''\n\n # print \"Got \", user['username'], ' - ', user['ldap_home'], ' - ', user['home_path']\n\n return 1\n","repo_name":"lcdrN/ulb","sub_path":"hpc-py/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":3463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"6151736796","text":"#!/usr/bin/env python\n\nfrom flask import Flask, request, jsonify\nfrom file_storage.MemoryStorage import ContextStoreInMemory, Minifile\nfrom file_storage.CassandraStorage import ContextStoreCassandra\nimport config\n\napp = Flask(__name__)\n\n\ncontext_store_factory = {\n 'memory': lambda: ContextStoreInMemory(),\n 'cassandra': lambda: ContextStoreCassandra(config.MINIFILEBOX_CTX_STORE_CASSANDRA_KEYSPACE,\n config.MINIFILEBOX_CTX_STORE_CASSANDRA_NODES)\n}\n\ncontext_store = context_store_factory[config.MINIFILEBOX_STORAGE_TYPE]()\n\n\n@app.route(config.MINIFILEBOX_BASE_URI + '/context', methods=['POST'])\ndef store_context():\n file = Minifile().from_dict(request.json)\n context_store.save(file)\n return jsonify(status='OK')\n\n\n@app.route(config.MINIFILEBOX_BASE_URI + '/context/', methods=['GET'])\ndef load_context(file_id):\n return jsonify(context_store.load(file_id).to_dict())\n\n\n@app.route(config.MINIFILEBOX_BASE_URI + '/context/', methods=['DELETE'])\ndef delete_context(file_id):\n context_store.delete(file_id)\n return jsonify(status='OK')\n\n\n@app.route(config.MINIFILEBOX_BASE_URI + '/context', methods=['GET'])\ndef list_context():\n return jsonify([c.to_dict() for c in context_store.list()])\n\n\nif __name__ == '__main__':\n # Overriding default HTML exception handler\n # for ex in default_exceptions:\n # app.register_error_handler(ex, handle_error)\n app.run(host='0.0.0.0', port=5002)","repo_name":"sbisogni/minifilebox","sub_path":"components/contextstore_service/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"71742391369","text":"import sys\nimport heapq\n\ninput = sys.stdin.readline\nN = int(input())\narr = []\nfor i in range(N):\n heapq.heappush(arr, list(map(int, input().split())))\ntoTown, curGas = map(int, input().split())\n\ncount = 0\nmove = []\n\nwhile curGas < toTown:\n while arr and arr[0][0] <= curGas:\n toStation, fillGas = heapq.heappop(arr)\n heapq.heappush(move, [-fillGas, toStation])\n\n if not move:\n count = -1\n break\n \n fg, ts = heapq.heappop(move)\n curGas += -fg\n count += 1\n\nif toTown <= curGas:\n print(count)\nelse:\n print(-1)\n","repo_name":"rbgksqkr/TIL","sub_path":"2021_12/fill_fuel.py","file_name":"fill_fuel.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"18815796804","text":"\nimport random\nimport math\n\n\nimport box_helper as helper\n\n\n# 各个数据包装类型\n\n# 大矩形\nclass Big():\n pos = (0,0) # 位置, 记录自己在父亲中的相对位置\n width = 0 # 宽度\n height = 0 # 高度\n children = [] # 包含的小矩形\n bottomWidth = 0 #底层剩余宽度\n num = 0 # 队列编号\n w = 0 # 数值乘10,方便显示\n h = 0 # 数值乘10, 方便显示\n value = 0 # 总价值\n\n levelWidth = 0 # 加塞时剩余宽度\n currLevel = 0 # 加塞层数\n\n freeSpace = [] # 跳过未使用的空间\n\n\n\n def __init__(self, w=0, h=0):\n self.children = []\n self.setSize(w, h)\n self.bottomWidth = w\n self.num = 0\n\n self.levelWidth = 0\n self.currLevel = 0\n self.currTailPos = None\n\n self.freeSpace = []\n\n def __str__(self):\n return 'c:%d p:%d %d w:%d h:%d' % (len(self.children), self.pos[0], self.pos[1], self.width, self.height)\n\n # width 与 w 已经混用,不能分离了。。。\n def setSize(self, w, h):\n self.width = w\n self.height = h\n self.w = w\n self.h = h\n # 底层加入一个物品\n def bottomPack(self, small):\n small.pos = (self.width - self.bottomWidth, 0)\n small.spacePos = (small.pos[0], small.height)\n # print('small pos:', small.pos)\n self.bottomWidth -= small.width\n\n self.children.append(small)\n small.parent = self\n self.value += small.value\n\n # 清空箱子\n def clear(self):\n self.children = []\n self.bottomWidth = self.width\n\n self.value = 0\n\n # 设置序号,并计算在画布的位置\n def setNum(self, num):\n self.num = num\n x = 30\n y = (self.h + 20)*(num + 1)\n self.pos = (x, y)\n \n # 判断底层还能不能放入\n def bottomCanPack(self, small):\n return self.bottomWidth >= small.width\n\n # 替换一个物品\n def replace(self, index, newSub):\n subs = self.children\n oldSub = subs[index]\n del subs[index]\n oldSubs = subs\n self.clear()\n for sub in oldSubs:\n self.bottomPack(sub)\n self.bottomPack(newSub)\n return oldSub\n\n # children 按高度排序\n def sortByHeight(self):\n helper.sortByHeight(self.children)\n\n children = self.children\n self.clear()\n for subBox in children:\n self.bottomPack(subBox)\n # 在画布上画出自己\n def draw(self, canvas):\n width = self.w\n height = self.h\n x = self.pos[0]\n y = self.pos[1]\n # print(x, y, x+ width, y-height)\n bw = 3 # 边框粗细的一半\n # 画矩形\n canvas.create_rectangle(x-bw, y+bw, x+width+bw, y-height-bw, fill='#121212', outline='blue', stipple='gray12', width=5)\n canvas.create_text(x - 20, y - 10, text=str(self.num))\n\n for child in self.children:\n child.draw(canvas)\n \n # 从上部剩余空间加塞\n def spacePack(self, small):\n p = self.getSpacePos(small)\n small.pos = p\n small.spacePos = (small.pos[0], small.pos[1] + small.height)\n self.levelWidth = self.width - p[0] - small.width\n self.currTailPos = (small.pos[0] + small.width, small.pos[1])\n\n small.level = self.currLevel\n\n self.children.append(small)\n small.parent = self\n self.value += small.value\n # 判断剩余空间能否加塞\n def spaceCanPack(self, small):\n p = self.getSpacePos(small)\n # print(\"get \", p)\n return p\n # 取新物品能放的位置\n def getSpacePos(self, small):\n result = None\n # print('curLevel', self.currLevel)\n\n skipPos = []\n\n # 从下一层的顶部取可放入的位置\n for child in self.children:\n if child.level != self.currLevel - 1:\n continue\n p = child.spacePos\n if self.width - p[0] <= self.levelWidth:\n if self.width - p[0] >= small.width and self.height - p[1] >= small.height:\n result = p\n break\n else:\n skipPos.append(p)\n\n\n # 检查上个物品的结束点\n useTailPos = False # 记录是不是贴着上一个物体\n if self.currTailPos:\n p = self.currTailPos\n if self.width - p[0] >= small.width and self.height - p[1] >= small.height:\n if result is None:\n result = p\n else:\n # 比较一下,取面积大的起始点\n resultArea = (self.width-result[0]) * (self.height - result[1])\n tailArea = (self.width - p[0]) * (self.height - p[1])\n if tailArea > resultArea:\n result = p\n useTailPos = True\n\n # 记录未使用的空间\n if result and not useTailPos:\n if self.currTailPos:\n skipPos.insert(0, self.currTailPos)\n skipPos.append(result)\n\n # 有空间的记录下来\n if len(skipPos) > 1:\n self.freeSpace.append(skipPos)\n return result\n\n # 设置加塞高一级\n def spaceLevelUp(self):\n self.currLevel += 1\n self.levelWidth = self.width\n self.currTailPos = None\n\n # 剩余空间加塞\n def freeSpacePack(self, small):\n p = self.getFreeSpacePos(small)\n small.pos = p\n small.spacePos = (small.pos[0], small.pos[1] + small.height)\n\n self.children.append(small)\n small.parent = self\n self.value += small.value\n\n\n # 判断剩余空间是否可加塞\n def freeSpaceCanPack(self, small):\n pos = self.getFreeSpacePos(small)\n return pos\n \n # 获取剩余空间的加塞位置\n def getFreeSpacePos(self, small):\n result = None\n for sIndex in range(len(self.freeSpace)):\n space = self.freeSpace[sIndex]\n getPos = False\n for pIndex in range(len(space) - 1):\n point = space[pIndex]\n end = space[-1]\n width = end[0] - point[0]\n height = self.height - point[1]\n if width >= small.width and height >= small.height:\n result = point\n getPos = True\n del self.freeSpace[sIndex]\n break\n if getPos:\n break\n\n return result\n\n def getDrawPos(self):\n return self.pos\n\n\n\n# 小矩形\nclass Small(Big):\n big = None # 箱子\n parent = None # 小矩形所在的大矩形\n spacePos = (0,0) # 物品顶部位置\n level = 0 # 所在层\n\n def __init__(self, w=0, h=0, big=None):\n super().__init__(w, h)\n self.big = big\n self.calculateValue()\n\n # 计算物品价值\n def calculateValue(self):\n if len(self.children) > 0:\n self.calculateSingleValue()\n # print('for one:', self.value)\n self.value = 0\n for child in self.children:\n self.value += child.calculateValue()\n # print('for mulit:', self.value)\n \n else:\n self.calculateSingleValue()\n return self.value\n # 按本身的宽高计算价值\n def calculateSingleValue(self):\n # print('single:', self.value)\n w = self.width\n h = self.height\n rou = self.calculateRou()\n value = rou * math.pow(w * h, 1.2) * h / w\n self.value = value\n return value\n\n # 计算物品的 ρ 值\n def calculateRou(self):\n w = self.width\n h = self.height\n bw = self.big.width * 0.5\n bh = self.big.height * 0.5\n if (w > bw) and (h > bw):\n return 2\n elif (h > bh and w <= bw) or (w>bh and h<=bw) or (h>bw and w<=bh) or (w>bw and h < bh):\n return 1.5\n else:\n return 1\n\n def __str__(self):\n return 'i:%d p:%d %d w:%d h:%d' % (self.num, self.pos[0], self.pos[1], self.width, self.height)\n\n def draw(self, canvas):\n if len(self.children) == 0:\n self.drawSelf(canvas)\n else:\n # print(\"==============multi box pos \", self.pos)\n for child in self.children:\n child.draw(canvas)\n\n def drawSelf(self, canvas):\n # print('pPos', self.parent.pos)\n # print('pos', self.pos)\n # pPos = self.parent.pos\n drawPos = self.getDrawPos()\n # x = pPos[0] + self.pos[0] \n # y = pPos[1] - self.pos[1] \n x = drawPos[0]\n y = drawPos[1]\n\n color = '#' + ''.join(random.sample('0123456789', 6))\n canvas.create_rectangle(x, y, x+self.w, y-self.h, fill=color)\n # canvas.create_text(x + 10, y - 10, text=str(self.num))\n canvas.create_text(x + 20, y - 10, text=str(self.num) + \" \" + str(self.w))\n # print('draw', x, y, x+self.w, y-self.h)\n\n # 同宽的小盒子合并添加\n def addSub(self, box):\n box.parent = self\n box_y = 0\n for child in self.children:\n box_y += child.height\n box.pos = (0, box_y)\n # append 要放在计算 pos的后面, 要不连自己的高度也加进去了\n self.children.append(box) \n self.setSize(box.width, self.height + box.height)\n self.calculateValue()\n\n def getDrawPos(self):\n pPos = self.parent.getDrawPos()\n return (self.pos[0] + pPos[0], pPos[1] - self.pos[1])\n\n\nclass DataBox():\n bin = None # 大矩形\n datas = [] # 数据组, 放了10组 Small() 的数据列表, 每组 50 个","repo_name":"XiaoyuJiang17/BSc_2D-Bin_Packing","sub_path":"data_type.py","file_name":"data_type.py","file_ext":"py","file_size_in_byte":9674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70208014729","text":"violator_songs = [\n ['World in My Eyes', 4.86],\n ['Sweetest Perfection', 4.43],\n ['Personal Jesus', 4.56],\n ['Halo', 4.9],\n ['Waiting for the Night', 6.07],\n ['Enjoy the Silence', 4.20],\n ['Policy of Truth', 4.76],\n ['Blue Dress', 4.29],\n ['Clean', 5.83]\n]\n\namount = int(input('Сколько песен выбрать? '))\ntotal_time = 0\n\nfor num in range(amount):\n failure = 0\n print('Название,', str(num + 1) + '-й песни:', end=' ')\n title = input()\n for song in violator_songs:\n if song[0] == title:\n total_time += song[1]\n\nprint('\\nОбщее время звучания песен:', round(total_time, 2), 'минуты')","repo_name":"hummius/Python_basic","sub_path":"Module16/05_songs/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34405008729","text":"from flask import Flask, render_template, session, request, redirect\r\n\r\napp = Flask(__name__)\r\napp.secret_key ='secret'\r\nimport random \r\n\r\n\r\n@app.route('/')\r\ndef game():\r\n if 'target' not in session:\r\n session['target'] = random.randint(1,100)\r\n return render_template('index.html')\r\n\r\n@app.route('/guess', methods=['POST'])\r\ndef guess():\r\n if session['target'] == int(request.form['guess']):\r\n session['result'] = 'correct'\r\n\r\n elif session['target'] < int(request.form['guess']):\r\n session['result'] = 'high'\r\n\r\n else:\r\n session['result'] = 'low'\r\n\r\n return redirect('/')\r\n\r\n@app.route('/reset')\r\ndef reset():\r\n session.pop('target')\r\n session.pop('result')\r\n return redirect('/')\r\n\r\napp.run(debug=True)\r\n","repo_name":"py1-10-2017/AlmasM-PY1-10-2017","sub_path":"Python Week 2/gameofnumbers/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5576937692","text":"import pickle\nimport pandas as pd\nimport scipy.sparse as sp\nimport numpy as np\nfrom parameters import *\n\n\ntracks_df= pd.read_csv(FILES_PATH+'tracks.csv')\n\ndef bm25_normalize(mat):\n\tdf_list= np.ravel(mat.sum(axis=1))\n\tpickle.dump(df_list, open(FILES_PATH+'df_list.pkl','wb'))\n\tdoc_lens= np.ravel(mat.sum(axis=0))\n\tdoc_lens_avg= int(np.mean(doc_lens))\n\tprint(doc_lens_avg)\n\tmat.data= np.log2(n_docs/(1+df_list[mat.row]))*mat.data*(k1+1)/(k1*((1-b)+b*(doc_lens[mat.col]/doc_lens_avg))+mat.data)\n\treturn mat\n\ndef get_tvp_matrix():\n\tplaylist_df= pd.read_csv(FILES_PATH+'playlists.csv').iloc[:-5000]\n\tn_rows= len(playlist_df)\n\tn_cols= n_tracks\n\n\trow_vals= []\n\tcol_vals= []\n\n\tfor pid in playlist_df.index:\n\t\tfor tid in playlist_df.tracks[pid][1:-1].split(', '):\n\t\t\trow_vals.append(pid)\n\t\t\tcol_vals.append(int(tid))\n\n\ttvp_matrix= sp.coo_matrix((np.ones(len(row_vals)), (row_vals, col_vals)), shape=(n_rows,n_cols))\n\ttvp_matrix= tvp_matrix.getH()\n\ttvp_matrix= bm25_normalize(tvp_matrix)\n\n\treturn tvp_matrix\n\ndef get_sim_matrix(tvp_matrix):\n\tsim_matrix= tvp_matrix.dot(tvp_matrix.getH())\n\treturn sim_matrix\n\ntvp_matrix= get_tvp_matrix()\nsim_matrix= get_sim_matrix(tvp_matrix)\n#tvp_matrix= sp.csc_matrix(tvp_matrix)\n\nsp.save_npz(FILES_PATH+'tvp_matrix.npz', tvp_matrix)\nsp.save_npz(FILES_PATH+'sim_matrix.npz', sim_matrix)\n","repo_name":"Schlommy/COL764","sub_path":"Project/Collaborative Filtering/col_filt.py","file_name":"col_filt.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"15202516174","text":"from itertools import count\nfrom itertools import cycle\nfrom itertools import repeat\nfrom itertools import accumulate\nfrom itertools import dropwhile\nfrom itertools import filterfalse\nfrom itertools import takewhile\nfrom itertools import product\nfrom itertools import combinations\nfrom itertools import permutations\n\n\n\ndef demo_count():\n iterator = count(10)\n print(list(next(iterator) for _ in range(100)))\n\ndef demo_cycle():\n iterator = cycle('ABCD')\n aList = list(next(iterator) for _ in range(len('ABCD')))\n print(aList)\n\ndef demo_repeat():\n iterator = repeat(10, 5)\n aList = list(next(iterator) for _ in range(5))\n print(aList)\n\ndef demo_accumulate():\n iterator = accumulate([1,2,3])\n aList = list(next(iterator) for _ in range(3))\n print(aList)\n\ndef demo_dropwhile():\n originalList = [1,2,4,5,6]\n iterator = dropwhile(lambda x: x%2 > 0, originalList)\n for item in iterator:\n print(item)\n\ndef demo_filterfalse():\n originalList = [1, 2, 4, 5, 6]\n iterator = filterfalse(lambda x: x%2==0, originalList)\n for item in iterator:\n print(item)\n\ndef demo_takewhile():\n originalList = [1, 2, 4, 5, 6]\n iterator = takewhile(lambda x: x % 2 > 0, originalList)\n for item in iterator:\n print(item)\n\ndef demo_product():\n iterator = product('ABCD')\n for item in iterator:\n print(item)\n\ndef demo_product_with_repeat():\n iterator = product('ABCD', repeat=2)\n for item in iterator:\n print(item)\n\ndef demo_permutations():\n iterator = permutations('ABCD')\n for item in iterator:\n print(item)\n\ndef demo_combinations():\n print(\"combinations\")\n iterator = combinations('ABCD', 2)\n for item in iterator:\n print(item)\n\nif __name__ == '__main__':\n demo_count()\n demo_cycle()\n demo_repeat()\n demo_accumulate()\n demo_dropwhile()\n demo_filterfalse()\n demo_takewhile()\n demo_product()\n demo_product_with_repeat()\n demo_permutations()\n demo_combinations()","repo_name":"mbaeumer/python-challenge","sub_path":"block5-datastructures/lists/itertools/itertools_demo.py","file_name":"itertools_demo.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4820541872","text":"\n\"\"\"\nparameter: are variables that we specify inside parentheses at the time of function definition.\nargument: the values that are passed for the parameters when calling the function.\n\"\"\"\n\n\ndef summation(a, b):\n result = a + b\n print(result)\n\n\ndef subtraction():\n c = 100\n d = 20\n result = c - d\n print(result)\n\n\nsummation(100, 200)\nsummation(20, 20)\nsubtraction()\n\n\n","repo_name":"Muntasir101/PythonBasic16","sub_path":"8_Functions/function_demo3.py","file_name":"function_demo3.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"40018834194","text":"import pandas as pd\nimport os\n\nhome_file_name = r\"/home/jambrown/CP_Analysis/\"\n\ncp_downloads_zipped_file_name = r\"/home/jambrown/CP_Downloads/\"\nlist_of_zipped_cp_files = os.listdir(cp_downloads_zipped_file_name)\nlist_of_zipped_cp_files.sort(reverse=True) # Ensures most recent scans are processed first\n\n# Get label headers\ndf_0 = pd.read_parquet(path='/home/jambrown/CP_Analysis/CP_Satellite-2022-02-09-12-00-01/other_docs/max_asn.gzip', engine='pyarrow')\nmaster_df = pd.DataFrame.from_records(data=df_0, nrows=1)\nmaster_df.drop(labels=master_df.index[0:], inplace=True)\n\nfor folder_name in list_of_zipped_cp_files:\n\n # Load dataframe\n cp_scan_only_name = folder_name.split('.')[0]\n scan_base_file_name = home_file_name + cp_scan_only_name + r\"/\"\n asn_count_file_name = scan_base_file_name + \"other_docs/max_asn.gzip\"\n partial_df = pd.read_parquet(path=asn_count_file_name, engine='pyarrow')\n\n # Merge with master dataframe\n master_df = pd.concat([master_df, partial_df], ignore_index=True)\n\n# Relabel Records\nmaster_df.reset_index(drop=True, inplace=True)\n\nclean_records = []\n\nfor row_index in range(0, master_df.shape[0]):\n\n if master_df['most_common_asn_num'].iloc[row_index] == 0 or master_df['most_common_asn_count'].iloc[row_index] < 10000 or master_df['percent_of_asns'].iloc[row_index] < 0.5:\n\n clean_records.append(\"DIRTY\")\n\n else:\n\n clean_records.append(\"CLEAN\")\n\nmaster_df.insert(loc=master_df.shape[1], column=\"clean_records\", value=clean_records)\n\n# Save file\n# fastparquet must be used so that the categorical variables associated with integers (ex rcode) will also deserialize into categorical variables\nmaster_df.to_parquet(index=True, compression=\"gzip\", engine='pyarrow', path=home_file_name + \"max_asn_aggregate.gzip\")","repo_name":"theWazinator/AnomalyDetectionMiniScript","sub_path":"Aggregate_AS_Count.py","file_name":"Aggregate_AS_Count.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"28257629644","text":"import tempfile\nimport zipfile\nimport re\nimport os\nimport xmltodict\nimport pathlib\n\n\ndef read_file_from_zip(download_uri: str, filename: str):\n with tempfile.TemporaryDirectory() as path:\n with zipfile.ZipFile(download_uri, 'r') as zip_ref:\n zip_ref.extractall(path=path)\n\n for root, _, files in os.walk(path):\n for file in files:\n if re.search(filename, file):\n with open(os.path.join(root, file), encoding='utf8') as f:\n content = xmltodict.parse(f.read())\n return content\n return {}\n\n\ndef unzip_xbrl(dest_directory: str, download_uri: str):\n with zipfile.ZipFile(download_uri, 'r') as zip_ref:\n zip_ref.extractall(path=dest_directory)\n for root, _, files in os.walk(dest_directory):\n for file in files:\n if pathlib.Path(os.path.join(root, file)).suffix == '.xbrl':\n return os.path.join(root, file)\n return ''\n","repo_name":"krosstrading/krxmarket","sub_path":"krxmarket/common/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2582053088","text":"import os\nimport shutil\nimport subprocess\nimport sys\nfrom load_settings import load_settings\n\ndef build_project(android_project_path, is_debug):\n if os.name == 'nt':\n gradle_executable = \"gradlew.bat\"\n else:\n gradle_executable = \"./gradlew\"\n\n os.chdir(android_project_path)\n subprocess.run([gradle_executable, \"clean\"])\n build_type = \"assembleDebug\" if is_debug else \"assembleRelease\"\n subprocess.run([gradle_executable, build_type])\n os.chdir(\"..\")\n\ndef copy_aar(android_project_path, unity_project_path, is_debug):\n # Define source and destination folders\n source_folder = os.path.join(android_project_path, \"app\", \"build\", \"outputs\", \"aar\")\n destination_folder = os.path.join(unity_project_path, \"Assets\", \"MirageWidget\", \"Plugins\", \"MirageWebViewPlugin\", \"Android\")\n\n # Check if the source folder exists\n if os.path.exists(source_folder):\n # Find the app-release.aar file\n file_name = \"app-debug.aar\" if is_debug else \"app-release.aar\"\n file_copied = False\n \n for file in os.listdir(source_folder):\n if file.endswith(file_name):\n source_file = os.path.join(source_folder, file)\n destination_file = os.path.join(destination_folder, \"mirage-webview.aar\")\n\n # Copy and rename the file\n shutil.copyfile(source_file, destination_file)\n print(f\"Copied and renamed {source_file} to {destination_file}\")\n file_copied = True\n break\n\n if not file_copied:\n print(f\"built plugin file {file_name} not found in {source_folder}\")\n else:\n print(f\"Source folder {source_folder} does not exist.\")\n\nif __name__ == \"__main__\":\n settings = load_settings()\n android_project_path = os.path.abspath(settings['AndroidProjectPath'])\n unity_project_path = os.path.abspath(settings['UnityProjectPath'])\n \n # If debug folder exists in the plugin sources in this case we build debug\n is_debug = os.path.exists(os.path.join(android_project_path, \"app\", \"src\", \"debug\"))\n\n build_project(android_project_path, is_debug)\n copy_aar(android_project_path, unity_project_path, is_debug)\n","repo_name":"Ankr-network/mirage-widget-unity","sub_path":"BuildScripts/build_and_copy_aar.py","file_name":"build_and_copy_aar.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"71632490889","text":"resposta = 'S'\nsoma = quantidade = média = maior = menor = 0\nwhile resposta in 'Ss':\n número = int(input('Digite um número: '))\n soma += número\n quantidade += 1\n if quantidade == 1:\n maior = número\n menor = número\n else:\n if número > maior:\n maior = número\n elif número < menor:\n menor = número\n\n resposta = str(input('Quer continuar? [S/N] ')).upper().strip()[0]\nmédia = soma / quantidade\nprint('Ao todo foram {} valores informados e a média entre eles foi {:.2f}'.format(quantidade, média))\nprint('O maior valor informado foi {} e o menor foi {}'.format(maior, menor))\n\nprint('Fim do programa!')","repo_name":"lucasrenandns/Python-3","sub_path":"curso python/exercicios/mundo-2/ex063/maioremenorvalores.py","file_name":"maioremenorvalores.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"13323395155","text":"from time import time\nfrom fractions import Fraction\n\nt1 = time()\nL = [Fraction(3 / 2)]\n\n\ndef approx(x):\n if x == 0:\n return Fraction(3, 2)\n else:\n L.append(1 + Fraction(1, (1 + L[x - 1])))\n return 1 + Fraction(1, (1 + L[x - 1]))\n\n\nans = 0\nfor i in range(1000):\n x = approx(i)\n if len(str(x.numerator)) > len(str(x.denominator)):\n ans += 1\nprint(ans)\nprint(f\"Process completed in {time()-t1}s\")\n","repo_name":"PraneethJain/Project-Euler","sub_path":"Solutions/Problem_057.py","file_name":"Problem_057.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34154050378","text":"#!/usr/bin/env python\nimport re,sys\nfrom itertools import zip_longest\nfrom subprocess import run\n\nassert len(sys.argv) > 1\n\n# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx\"\ndef grouper(iterable, n, fillvalue=None):\n \"Collect data into fixed-length chunks or blocks\"\n args = [iter(iterable)] * n\n return zip_longest(*args, fillvalue=fillvalue)\n\ndef group_command(flatten_commands, commands):\n \"Group string by it's command name\"\n separator = f\"({'|'.join(list_command)})\"\n separated_cmd_params = [s.strip() for s in re.split(separator, flatten_commands)[1:]]\n return [' '.join(s) for s in grouper(separated_cmd_params, 2)]\n\ncargo_list = run(['cargo', '--list'], capture_output=True)\nstdout = cargo_list.stdout.decode('utf-8')\n\ncommand_help = stdout.split('\\n')[1:-1] # remove endline and title string\nlist_command =[s.split(maxsplit=1)[0] for s in command_help] # remove help description\n\ngrouped = group_command(' '.join(sys.argv[1:]), list_command)\n\nprefix_command = lambda f: '\"{prefix} {}\"'.format(f'\" \"{f} '.join(grouped), prefix=f)\nprint(prefix_command('cargo'))\n","repo_name":"DrSensor/scdlang","sub_path":".github/action/perf/wrap-args.py","file_name":"wrap-args.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":97,"dataset":"github-code","pt":"16"} +{"seq_id":"10741108139","text":"import argparse\nimport json\nimport yaml\nfrom kafka import KafkaConsumer, TopicPartition\nfrom datetime import datetime\n\ndef consume_messages(config, topics, show_options, output, output_file_pattern, output_type, offset, timestamp, group,\n timestamp_format):\n # Load Kafka configuration from JSON or YAML file\n if config.endswith('.json'):\n with open(config) as file:\n kafka_config = json.load(file)\n elif config.endswith('.yaml') or config.endswith('.yml'):\n with open(config) as file:\n kafka_config = yaml.safe_load(file)\n else:\n # Assume config is a space-separated key-value string\n kafka_config = dict(item.split('=') for item in config.split())\n\n # Create Kafka consumer\n consumer = KafkaConsumer(bootstrap_servers=kafka_config['bootstrap_servers'],\n security_protocol=kafka_config['security_protocol'],\n ssl_cafile=kafka_config['ssl_cafile'],\n ssl_certfile=kafka_config['ssl_certfile'],\n ssl_keyfile=kafka_config['ssl_keyfile'],\n ssl_check_hostname=kafka_config.get('ssl_check_hostname', True),\n group_id=group)\n\n partitions = []\n for topic in topics:\n if ':' in topic:\n topic_name, partition_num = topic.split(':')\n partition = int(partition_num)\n partitions.append(TopicPartition(topic=topic_name, partition=partition))\n else:\n partitions.append(topic)\n\n consumer.assign(partitions)\n\n if offset:\n # Seek to the specified offset for each partition\n for partition in partitions:\n consumer.seek(partition, offset)\n elif timestamp:\n # Convert timestamp to milliseconds\n timestamp_ms = int(timestamp * 1000)\n # Seek to the specified timestamp for each partition\n for partition in partitions:\n consumer.seek(partition, timestamp_ms)\n\n # Iterate over messages and extract desired information\n messages = []\n for message in consumer:\n data = {}\n if 'topic' in show_options:\n data['topic'] = message.topic\n if 'partition' in show_options:\n data['partition'] = message.partition\n if 'offset' in show_options:\n data['offset'] = message.offset\n if 'timestamp' in show_options:\n data['timestamp'] = datetime.fromtimestamp(message.timestamp / 1000).strftime(timestamp_format)\n if 'key' in show_options:\n data['key'] = message.key.decode('utf-8')\n if 'header' in show_options:\n data['header'] = dict(message.headers)\n data['message'] = message.value.decode('utf-8')\n messages.append(data)\n\n # Write message to output file if specified\n if output:\n timestamp_parts = {\n 'Y': '%Y',\n 'm': '%m',\n 'M': '%M',\n 'd': '%d',\n 'H': '%H',\n 'S': '%S',\n 'MS': '{:03d}'.format(int(datetime.fromtimestamp(message.timestamp / 1000).strftime('%f')[:3]))\n }\n\n file_name = output_file_pattern\n for part, format_str in timestamp_parts.items():\n file_name = file_name.replace('{{' + part + '}}', format_str)\n\n file_name = file_name.replace('{{topic}}', message.topic)\\\n .replace('{{partition}}', str(message.partition))\\\n .replace('{{offset}}', str(message.offset))\\\n .replace('{{extension}}', output_type)\n\n file_path = f\"{output}/{file_name}\"\n with open(file_path, 'a') as file:\n if output_type == 'json':\n file.write(f\"{json.dumps(data)}\\n\")\n elif output_type in ['yaml', 'yml']:\n file.write(f\"{yaml.dump(data)}\\n\")\n elif output_type == 'xml':\n xml_data = convert_to_xml(data)\n file.write(f\"{xml_data}\\n\")\n elif output_type == 'csv':\n csv_data = convert_to_csv(data)\n file.write(f\"{csv_data}\\n\")\n elif output_type == 'tsv':\n tsv_data = convert_to_tsv(data)\n file.write(f\"{tsv_data}\\n\")\n elif output_type == 'ssv':\n ssv_data = convert_to_ssv(data)\n file.write(f\"{ssv_data}\\n\")\n else:\n raise ValueError(f\"Unsupported output type: {output_type}\")\n\n return messages\n\ndef convert_to_xml(data):\n # Convert data dictionary to XML format\n xml_data = \"\\n\"\n for key, value in data.items():\n if key == 'header':\n xml_data += \"
\\n\"\n for h_key, h_value in value.items():\n xml_data += f\" <{h_key}>{h_value}\\n\"\n xml_data += \"
\\n\"\n else:\n xml_data += f\" <{key}>{value}\\n\"\n xml_data += \"
\"\n return xml_data\n\ndef convert_to_csv(data):\n # Convert data dictionary to CSV format\n csv_data = ''\n for key, value in data.items():\n if key == 'header':\n for h_key, h_value in value.items():\n csv_data += f\"{h_key},{h_value}\\n\"\n else:\n csv_data += f\"{key},{value}\\n\"\n return csv_data\n\ndef convert_to_tsv(data):\n # Convert data dictionary to TSV format\n tsv_data = ''\n for key, value in data.items():\n if key == 'header':\n for h_key, h_value in value.items():\n tsv_data += f\"{h_key}\\t{h_value}\\n\"\n else:\n tsv_data += f\"{key}\\t{value}\\n\"\n return tsv_data\n\ndef convert_to_ssv(data):\n # Convert data dictionary to SSV format\n ssv_data = ''\n for key, value in data.items():\n if key == 'header':\n for h_key, h_value in value.items():\n ssv_data += f\"{h_key} {h_value}\\n\"\n else:\n ssv_data += f\"{key} {value}\\n\"\n return ssv_data\n\ndef main():\n parser = argparse.ArgumentParser(description='Kafka message consumer')\n parser.add_argument('--config', required=True, help='Kafka connection and security configuration')\n parser.add_argument('--topics', required=True, help='Kafka topics to consume from')\n parser.add_argument('--show', help='Comma-separated list of options to show (key, header, partition, topic, offset, timestamp)')\n parser.add_argument('--output', help='Output folder for messages')\n parser.add_argument('--outputFilePattern', default='{{topic}}-{{partition}}-{{offset}}.{{extension}}',\n help='Output file name pattern using mustache syntax')\n parser.add_argument('--type', default='json',\n choices=['text', 'json', 'yaml', 'yml', 'xml', 'csv', 'tsv', 'ssv'],\n help='Output type')\n parser.add_argument('--offset', type=int, help='Start consuming from the specified offset')\n parser.add_argument('--timestamp', type=float, help='Start consuming from the specified timestamp')\n parser.add_argument('--group', help='Consumer group name')\n parser.add_argument('--timestampFormat', default='%Y-%m-%d %H:%M:%S.{{MS}}',\n help='Timestamp format for output file name using strftime syntax')\n args = parser.parse_args()\n\n show_options = []\n if args.show:\n show_options = args.show.split(',')\n\n messages = consume_messages(args.config, args.topics.split(','), show_options, args.output,\n args.outputFilePattern, args.type, args.offset, args.timestamp, args.group,\n args.timestampFormat)\n\n if args.type == 'json':\n print(json.dumps(messages))\n elif args.type in ['yaml', 'yml']:\n print(yaml.dump_all(messages, default_flow_style=False))\n else:\n print(\"Unsupported output type\")\n\nif __name__ == '__main__':\n main()","repo_name":"armoin2018/kafka-tools","sub_path":"kafka-consumer/kafka-consumer.py","file_name":"kafka-consumer.py","file_ext":"py","file_size_in_byte":8033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12472241585","text":"import math\r\n\r\nR, X, Y = map(int, input().split())\r\n\r\nZ = math.sqrt(X**2 + Y**2)\r\n\r\nans = 0\r\n\r\nwhile True:\r\n if R == Z:\r\n ans += 1\r\n break\r\n \r\n if 2*R < Z:\r\n ans += 1\r\n Z -= R\r\n else:\r\n ans += 2\r\n break\r\n\r\nprint(ans)","repo_name":"sbmtrntr/AtCoder","sub_path":"ABC/150~199/198/C1.py","file_name":"C1.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4335231000","text":"from timbral_models import timbral_roughness\n\n# generic file location\n\narq = open('/home/douglas/Música/musicas/wav/tristes/tristes.txt','r')\nlines = arq.readlines()\narq.close()\n\nlista = []\n\ncount=0\nfor l in lines:\n fname = '/home/douglas/Documentos/tcc_code/musicas/wav/felizes_30'+music\n # calculate brightness\n brightness = timbral_roughness(fname) \n print(brightness)","repo_name":"dodo1210/tcc_code","sub_path":"elementos/roughness/executa.py","file_name":"executa.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"30989507662","text":"#this program produces a list out of a function that takes a list as argument\n\ndef commacode(insertList):\n buffer=\"\"\n if (len(insertList)==0):\n print(\"Nothing in the list\")\n else:\n for i in range(len(insertList)-1):\n buffer.append = insertList[i]\n print(insertList[i] + \", \" + insertList[-1])\n\n\nspam = [\"cat\",\"rat\",\"mouse\",\"elephant\", \"eagle\"]\nendOfList = spam[-1]\nnewEndOfList = \" and \" + endOfList\nnewLine = \", \".join(spam[:-1])\nprint(newEndOfList)\nprint(newLine + newEndOfList)\n","repo_name":"Sinsnickers/Automatetheboringstuff","sub_path":"Chapter4/PracticeProjects/CommaCode.py","file_name":"CommaCode.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"14872604003","text":"import pandas as pd\nimport pathlib\nimport imageio\nimport numpy as np\nimport skimage\n\nfrom utils.imaging import get_path, get_image_ids\n\nfrom tqdm import tqdm\nfrom scipy import ndimage\nfrom skimage.color import rgb2gray\nfrom skimage.filters import threshold_otsu\n\ndef rle_encoding(x):\n '''\n Performs run length encoding on an array\n\n Args:\n x (ndarray): numpy array of shape (height, width), 1 - mask, 0 - background\n Returns:\n (list): run length as list\n '''\n dots = np.where(x.T.flatten()==1)[0]\n run_lengths = []\n prev = -2\n for b in dots:\n if (b > prev+1): run_lengths.extend((b+1, 0))\n run_lengths[-1] += 1\n prev = b\n return \" \".join([str(i) for i in run_lengths])\n\n\ndef rle_image(labels_image, image_id):\n '''\n Take a labelled image and image id then perform rle and return a pandas dataframe\n\n Args:\n labels_image (ndarray): a sequentially labelled image\n Return:\n df_image (dataframe): data frame of ImageId, Encoding\n '''\n num_labels = np.amax(labels_image)\n df_image = pd.DataFrame(columns=['ImageId','EncodedPixels'])\n for label_num in range(1, num_labels+1):\n label_mask = np.where(labels_image == label_num, 1, 0)\n if label_mask.flatten().sum() > 10:\n rle = rle_encoding(label_mask)\n rle_series = pd.Series({'ImageId': image_id[:-4], 'EncodedPixels': rle})\n df_image = df_image.append(rle_series, ignore_index=True)\n return df_image\n\n\ndef rle_images_in_dir(image_type='test', stage_num = 1):\n '''\n Performs rle on all labelled images in a directory\n\n Arguments:\n image_type (str): training or test data\n stage_num (int): stage number of the data\n\n '''\n stage_num = str(stage_num)\n input_path = get_path('output_' + image_type + '_' + stage_num + '_lab_seg')\n image_ids = get_image_ids(input_path)\n output_path = get_path('output_' + image_type + '_' + stage_num)\n\n df_all = pd.DataFrame()\n for idx, image_id in tqdm(enumerate(image_ids), total=len(image_ids)):\n image_dir = input_path + image_id\n image = skimage.io.imread(image_dir)\n df_image = rle_image(image, image_id)\n df_all = df_all.append(df_image, ignore_index=True)\n #print('encoded image %d of %d, image: %s \\n' % \\\n # (idx + 1, len(image_ids), image_id[:-4]))\n #df_all.to_csv(output_path + 'rle_submission.csv', index=None)\n return df_all\n\nif __name__ == '__main__':\n df = rle_images_in_dir(image_type = 'test', stage_num = 1)\n","repo_name":"williamgrimes/nuclei_segmentation","sub_path":"utils/run_length_encoding.py","file_name":"run_length_encoding.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"40014153689","text":"import numpy as np\r\n\r\nN = int(input())\r\nwv = np.array([list(map(int, input().split())) for i in range(0, N)])\r\nW = int(input())\r\n\r\n# DPテーブル\r\ndp = np.zeros((N+1, W+1), dtype=int)\r\n\r\ndef solve():\r\n for n in range(N - 1, -1, -1):\r\n for w in range(0, W + 1):\r\n if w < wv[n, 0]:\r\n dp[n, w] = dp[n + 1, w]\r\n else:\r\n dp[n, w] = max(dp[n+1, w], dp[n+1, w - wv[n, 0]] + wv[n, 1])\r\n\r\n print(dp[0, W])\r\n\r\ndef main():\r\n solve()\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"koralle/Python","sub_path":"ant_programming/napzack_03.py","file_name":"napzack_03.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36842845252","text":"portal = context.getPortalObject()\ntemplate_tool = portal.portal_templates\n\ntemplate_tool.updateRepositoryBusinessTemplateList(\n template_tool.getRepositoryList())\n\nmethod_kw = {'bt5_list': ['erp5_core'],\n 'deprecated_after_script_dict': None,\n 'deprecated_reinstall_set': None,\n 'dry_run': False,\n 'delete_orphaned': False,\n 'keep_bt5_id_set': [],\n 'update_catalog': False}\n\n\ntemplate_tool.upgradeSite(**method_kw)\n\nif skip_upgrader:\n return \"Skipped to upgrade slapos_upgrader\"\n\n# Use activities to ensure that it done on another transaction\ntag = \"upgrader:ERP5Site_upgradeUpgraderBusinessTemplate\"\ntemplate_tool.activate(\n tag=tag).ERP5Site_upgradeUpgraderBusinessTemplate()\n\nif skip_alarm:\n return \"Skipped to call portal_alarms.promise_check_upgrade.activeSense\"\nportal.portal_alarms.promise_check_upgrade.activate(\n tag=\"upgrader:promise_check_upgrade\", after_tag=tag).activeSense()\n","repo_name":"SlapOS/slapos.core","sub_path":"master/bt5/slapos_upgrader/SkinTemplateItem/portal_skins/slapos_upgrader/ERP5Site_upgradeERP5CoreBusinessTemplate.py","file_name":"ERP5Site_upgradeERP5CoreBusinessTemplate.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"16"} +{"seq_id":"71100495367","text":"f = open('test.bin', 'wb+')\n\nf.write(b'12,34,53,25,61,28,78/n')\n\nstr1 = '中文/n测试/n'\nbytes1 = bytes(str1, encoding='gbk')\nf.write(bytes1)\n\nf.seek(0)\nbytes2 = f.read()\nstr2 = bytes2.decode('gbk', 'ignore')\nprint(str2)\n\nf.close()\n\n","repo_name":"douzujun/Python-Foundation-Suda","sub_path":"上机题目和面试题整理/Python-Foundation-Suda-master/01_董付国Python书题/07_FILE/binfile.py","file_name":"binfile.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"da","doc_type":"code","stars":51,"dataset":"github-code","pt":"16"} +{"seq_id":"38285072801","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\nhttps://arxiv.org/abs/1507.06527\nhttps://arxiv.org/pdf/1507.06527.pdf\n\n\"\"\"\nimport gym\nimport ptan\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom lib import utils, atari_wrappers, data\nfrom tensorboardX import SummaryWriter\n\nALGORITHM = \"DQRNN\"\n\n\nclass DRQNet(nn.Module):\n def __init__(self, shape, actions, conv_features=64, num_layers=4, hidden_size=128, device='cuda'):\n super(DRQNet, self).__init__()\n\n self.shape = shape\n self.actions = actions\n self.conv_features = conv_features\n self.num_layers = num_layers\n self.hidden_size = hidden_size\n self.device = device\n\n self.conv = nn.Sequential(nn.Conv2d(shape[0], 32, kernel_size=8, stride=4, bias=False),\n nn.ReLU(),\n nn.Conv2d(32, conv_features, kernel_size=4, stride=2, bias=False),\n nn.ReLU(),\n nn.Conv2d(conv_features, conv_features,\n kernel_size=3, stride=1, bias=False),\n nn.ReLU(),\n nn.AdaptiveMaxPool2d((conv_features, 1))\n )\n\n self.gru_input = self._gru_input_size()\n\n self.gru = nn.GRU(self.gru_input, hidden_size=hidden_size, num_layers=num_layers, batch_first=True)\n self.act = nn.ReLU()\n\n fc_input_size = hidden_size * conv_features\n\n self.fc = nn.Sequential(nn.Flatten(1),\n nn.Linear(fc_input_size, 512),\n nn.ReLU(),\n nn.Linear(512, actions))\n self.to(device)\n\n def forward(self, x, hidden):\n \"\"\"\n GRU <- (batch, h, w)\n h0: (layers, batch, hidden_size)\n \"\"\"\n fx = x.float()/255\n out = self.conv(fx)\n # gru_input: (batch, h, w), h0: (layers, batch, hidden_size)\n self.gru.flatten_parameters()\n out, new_hidden = self.gru(out[:, :, :, -1], hidden)\n out = self.fc(self.act(out))\n return out, new_hidden\n\n def _gru_input_size(self):\n o = torch.zeros(1, *self.shape)\n return np.product(self.conv(o).detach().numpy().shape[-2:])\n\n def init_hidden(self, batch_size):\n return torch.zeros(self.num_layers, batch_size, self.hidden_size).to(self.device)\n\n\ndef unpack_batch(batch):\n \"\"\"Unpack standard ptan experience first-last batch.\"\"\"\n states, actions, rewards, dones, last_states = [], [], [], [], []\n for exp in batch:\n state = np.array(exp.state, copy=False)\n states.append(state)\n actions.append(exp.action)\n rewards.append(exp.reward)\n dones.append(exp.last_state is None)\n if exp.last_state is None:\n last_states.append(state) # the result will be masked anyway\n else:\n last_states.append(np.array(exp.last_state, copy=False))\n\n return np.array(states, copy=False), np.array(actions), np.array(rewards, dtype=np.float32), \\\n np.array(dones), np.array(last_states, copy=False)\n\n\ndef calc_loss_dqn(batch, net, tgt_net, gamma, device=\"cpu\"):\n \"\"\"Calculate DeepQ Loss \"\"\"\n states, actions, rewards, dones, next_states = unpack_batch(batch)\n states_v = torch.tensor(states).to(device)\n actions_v = torch.tensor(actions).to(device)\n rewards_v = torch.tensor(rewards).to(device)\n done_mask = torch.tensor(dones).to(device)\n state_action_values = net(states_v).gather(1, actions_v.unsqueeze(-1)).squeeze(-1)\n with torch.no_grad():\n next_states_v = torch.tensor(next_states).to(device)\n next_state_values = tgt_net(next_states_v).max(1)[0]\n next_state_values[done_mask] = 0.0\n expected_state_action_values = next_state_values.detach() * gamma + rewards_v\n return torch.nn.functional.mse_loss(state_action_values, expected_state_action_values)\n\n\nif __name__ == '__main__':\n message = '*** Using Deep Recurrent Q-Learning Algorithm *** '\n args = utils.argpars_dqn(message)\n params = data.params[args.env]\n utils.update_params(params, args)\n\n device = 'cuda' if args.cuda else 'cpu'\n # 'RiverraidNoFrameskip-v4' CentipedeNoFrameskip-v4 'PongNoFrameskip-v4'\n\n envs = []\n for _ in range(params.n_envs):\n env = gym.make(params.env)\n env = atari_wrappers.wrap_dqn_light(env, stack_frames=params.frame_stack,\n skip=args.skip, max_episode_steps=args.max)\n env.seed(params.seed)\n envs.append(env)\n\n shape = env.observation_space.shape\n actions = env.action_space.n\n\n net = DRQNet(shape, actions, hidden_size=128, device=device)\n tgt_net = ptan.agent.TargetNet(net)\n\n selector = ptan.actions.ArgmaxActionSelector()\n\n agent = ptan.agent.DQNAgent(lambda x: net(x, None)[0], selector, device=device)\n eps_tracker = ptan.actions.EpsilonTracker(selector, params.eps_start, params.eps_final, params.eps_frames)\n\n exp_src = ptan.experience.ExperienceSourceFirstLast(envs, agent, gamma=params.gamma, steps_count=params.steps)\n buffer = ptan.experience.ExperienceReplayBuffer(exp_src, params.buffer_size)\n\n mean_monitor = utils.MeanRewardsMonitor(env, net, ALGORITHM, params.solve_rewards)\n\n writer = SummaryWriter(logdir=mean_monitor.runs_dir,\n comment=str(params.n_envs))\n writer.add_text(ALGORITHM+ ' HParams', str(vars(params)))\n writer.add_text('Number of Trainable Parameters', str(utils.count_parameters(net)))\n\n optimizer = torch.optim.Adam(net.parameters(), lr=params.lr,)\n\n print(net)\n print('*'*10, ' Start Training ',\n env.game, ' {} '.format(device), '*'*10)\n\n frame = 0\n episode = 0\n with ptan.common.utils.RewardTracker(writer) as tracker:\n while True:\n frame += params.n_envs\n eps_tracker.frame(frame)\n buffer.populate(params.n_envs)\n reward = exp_src.pop_total_rewards()\n if reward:\n episode += 1\n mean = tracker.reward(\n reward[0], frame, epsilon=selector.epsilon)\n if mean_monitor(mean):\n break\n\n if len(buffer) < params.init_replay:\n continue\n\n optimizer.zero_grad()\n batch = buffer.sample(params.batch_size)\n loss_v = calc_loss_dqn(batch,\n lambda x: net(x, net.init_hidden(params.batch_size))[0],\n lambda x: tgt_net.target_model(x, net.init_hidden(params.batch_size))[0],\n params.gamma**params.steps, device=device)\n loss_v.backward()\n optimizer.step()\n\n if frame % params.sync_nets == 0:\n tgt_net.sync()\n","repo_name":"ayjabri/refresh","sub_path":"10_dqrnn.py","file_name":"10_dqrnn.py","file_ext":"py","file_size_in_byte":6919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41715454760","text":"import numpy as np\nimport math\nimport operator\nimport sys\n\nimport src.Distributions as Distributions\n\n\n# global var for distributions\n\nclass Bayes:\n\tdef __init__(self, isNaive, distribution=[]):\n\t\t# last column should be Y\n\t\tself.priors = []\n\t\tself.isNaive = isNaive\n\t\tself.distribution = distribution\n\t\tself.parameters = []\n\t\tself.posterior_probas = []\n\n\n\t# algorithm\n\tdef train(self,data):\n\t\t# assume distribution of ccd - for now assume Gaussian, which is the default\n\t\t# calculate MLE\n\t\tself.calculate_priors(data)\n\t\tif not self.isNaive:\n\t\t\t# Multivariate Gaussian; parameter is of size 1; contains u and sigma dicts\n\t\t\tself.ml_estimate(data)\n\t\t\treturn\n\t\tif len(self.distribution) != data.shape[1] - 1:\n\t\t\tprint(\"distribution array is not equal to number of n_features = \"+str(data.shape[1]-1))\n\t\t\texit()\n\t\tself.ml_estimate(data)\n\n\n\t# calculates prior of each class\n\tdef calculate_priors(self,data):\n\t\tN = data.shape[0]\n\t\tpriors = {}\n\t\t# counting the number of instances for each class\n\t\tfor i in range(data.shape[0]):\n\t\t\telem = data[i]\n\t\t\tq = elem[len(elem)-1]\n\t\t\tif q in priors:\n\t\t\t\tpriors[q] += 1\n\t\t\telse:\n\t\t\t\tpriors[q] = 1\n\n\t\tfor key in priors:\n\t\t\tpriors[key] = (priors[key]*1.0)/N\n\t\tself.priors = priors\n\n\n\t# function to predict labels - to be called from main\n\tdef fit(self, test_X):\n\t\t# multiply likelihood to priors\n\t\t# print(len(self.parameters))\n\t\t# print(self.parameters[0])\n\t\t# print(self.parameters[1])\n\t\t# print(self.parameters[2])\n\t\t# print(self.parameters[3])\n\t\t# print(self.parameters[4])\n\t\t# print(self.parameters[5])\n\t\tpredicted_class = []\n\t\tprobas = []\n\t\tfor x in test_X:\n\t\t\t# x = x[:-1]\n\t\t\tposteriors = {}\n\t\t\tfor c in self.priors:\n\t\t\t\t# c = int(c)\n\t\t\t\tif not self.isNaive:\n\t\t\t\t\tlikelihood = Distributions.gaussian_multivar(x[:-1], self.parameters[0][0][c], self.parameters[0][1][c])\n\t\t\t\telse:\n\t\t\t\t\tlikelihood = 1\n\t\t\t\t\tfor i in range(len(self.distribution)):\n\t\t\t\t\t\tdi = self.distribution[i]\n\t\t\t\t\t\tif di == 0:\n\t\t\t\t\t\t\tlikelihood = likelihood*Distributions.gaussian(x[i],self.parameters[i][0][c][0],self.parameters[i][1][c][0][0]) \n\t\t\t\t\t\telif di == 1:\n\t\t\t\t\t\t\tlikelihood = likelihood*self.parameters[i][c][x[i]]\n\t\t\t\t\t\telif di == -1:\n\t\t\t\t\t\t\tcontinue\n\n\t\t\t\tposteriors[c] = likelihood*self.priors[c]\n\n\t\t\tbest_class = max(posteriors.items(), key=operator.itemgetter(1))[0]\n\t\t\tpredicted_class.append(best_class)\n\t\t\tprobas.append(posteriors)\n\t\tself.posterior_probas = probas\t\t\n\t\treturn predicted_class\n\t\n\tdef get_probas(self):\n\t\tif len(self.posterior_probas)==0:\n\t\t\tprint(\"WARNING: pass training data to predict first\")\n\t\treturn self.posterior_probas\n\n\t\n\tdef ml_estimate(self,data):\n\t\t# Returns list of parameters for each distribution type; parameter list of length same as distribution\n\t\t# probability distribution of x given theta(parameters)\n\t\t# guassian\n\t\tn_features = data.shape[1] - 1\n\t\tif not self.isNaive:\n\t\t\t# Fit a multivariate Gaussian in this case\n\t\t\tself.parameters.append(Distributions.gaussian_mle(data))\n\t\t\treturn\n\t\tfor i in range(len(self.distribution)):\n\t\t\tX = np.vstack((data[:,i],data[:,-1]))\n\t\t\tX = X.transpose()\n\t\t\tdi = self.distribution[i]\n\t\t\tif di == 0:\n\t\t\t\tself.parameters.append(Distributions.gaussian_mle(X))\n\t\t\telif di ==1:\n\t\t\t\tself.parameters.append(Distributions.multinomial_mle(X))\n\t\t\telif di == -1:\n\t\t\t\tself.parameters.append(-1)\n","repo_name":"navreeetkaur/machine-learning-algorithms","sub_path":"src/Bayes.py","file_name":"Bayes.py","file_ext":"py","file_size_in_byte":3278,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"14354641628","text":"import torch\n\ndef train(model, train_loader, criterion, optimizer, epochs, epoch):\n model.train()\n running_loss = 0.0\n for batch_idx, (inputs, targets) in enumerate(train_loader):\n optimizer.zero_grad()\n outputs = model(inputs)\n loss = criterion(outputs, targets)\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n\n # batch_idx += 1\n # if batch_idx % 10 == 0:\n # print(f'Epoch [{epoch+1}/{epochs}] Batch [{batch_idx}/{len(train_loader)}] Loss: {running_loss / batch_idx:.6f}')\n\n epoch_loss = running_loss / len(train_loader)\n print(f'Epoch [{epoch+1}/{epochs}] Training Loss: {epoch_loss:.6f}')\n return epoch_loss\n\ndef validate(model, val_loader, criterion, epochs, epoch):\n model.eval()\n running_loss = 0.0\n num_samples = 0\n for batch_idx, (inputs, targets) in enumerate(val_loader):\n with torch.no_grad():\n outputs = model(inputs)\n loss = criterion(outputs, targets)\n running_loss += loss.item()\n num_samples += targets.size(0)\n # batch_idx += 1\n # if batch_idx % 10 == 0:\n # print(f'Epoch [{epoch+1}/{epochs}] Batch [{batch_idx}/{len(val_loader)}] Loss: {running_loss / batch_idx:.6f}')\n\n epoch_loss = running_loss / len(val_loader)\n print(f'Epoch [{epoch+1}/{epochs}] Validation Loss: {running_loss/len(val_loader):.6f}')\n return epoch_loss","repo_name":"Darrensow/drn-pytorch","sub_path":"engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"39379079477","text":"from django.shortcuts import render\nfrom .models import Students\nfrom .serializer import StudentSerializer\nfrom rest_framework.renderers import JSONRenderer\nimport io\nfrom rest_framework.parsers import JSONParser\nfrom django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n# Create your views here.\n@method_decorator(csrf_exempt, name='dispatch')\nclass StudentApi(View):\n def get(self, request, *args, **kwargs):\n bodyData = request.body\n streamData = io.BytesIO(bodyData)\n pythonData = JSONParser().parse(streamData)\n id = pythonData.get('id', None)\n if id is not None:\n studentData = Students.objects.get(id = id)\n serializerData = StudentSerializer(studentData)\n jsonData = JSONRenderer().render(serializerData.data)\n return HttpResponse(jsonData, content_type='application/json')\n studentData = Students.objects.all()\n serializerData = StudentSerializer(studentData, many=True)\n jsonData = JSONRenderer().render(serializerData.data)\n return HttpResponse(jsonData,content_type='application/json')\n\n def post(self, request, *args, **kwargs):\n bodyData = request.body\n # print(bodyData)\n streamData = io.BytesIO(bodyData)\n pythonData = JSONParser().parse(streamData)\n serializerData = StudentSerializer(data = pythonData)\n if serializerData.is_valid():\n serializerData.save()\n res = {'msg':'Data has created'}\n res_json_data = JSONRenderer().render(res['msg'])\n \n return HttpResponse(res_json_data,content_type='application/json')\n # ser_data_check = serializerData.errors\n res_json_data = JSONRenderer().render(serializerData.errors)\n print(res_json_data)\n return HttpResponse(res_json_data, content_type='application/json')\n \n def updat(self, request, *args, **kwargs):\n bodyData = request.body\n # print(bodyData)\n streamData = io.BytesIO(bodyData)\n # print(streamData)\n pythonData = JSONParser().parse(streamData)\n print(pythonData)\n id = pythonData.get('id')\n # print(id)\n studentData = Students.objects.get(id=id)\n # Complete data - Required all data from front end/ client\n # serializerData = StudentSerializer(studentData, data=pythonData, partial=True) # This is for partial update\n # Partial Data - All data not required\n serializerData = StudentSerializer(studentData, data=pythonData, partial=True) # This is for complete update \n if serializerData.is_valid():\n serializerData.save()\n res = {'msg':'Data has uprdated'}\n res_json_data = JSONRenderer().render(res['msg'])\n return HttpResponse(res_json_data, content_type='application/json')\n \n res_json_data = JSONRenderer().render(serializerData.errors)\n print(res_json_data)\n return HttpResponse(res_json_data, content_type='application/json')\n\n def delete(self, request, *args, **kwargs):\n bodyData = request.body\n streamData = io.BytesIO(bodyData)\n pythonData = JSONParser().parse(streamData)\n id = pythonData.get('id')\n studentData = Students.objects.get(id=id)\n studentData.delete()\n res = {'msg':'Your data has deleted'}\n res_json_data = JSONRenderer().render(res['msg'])\n print(res_json_data)\n return HttpResponse(res_json_data, content_type='application/json')\n\n \n","repo_name":"RashadGhzi/DRF-and-MESSAGE-Framework","sub_path":"api/crudWithdrf - with class based view/cruddrf_proj/cruddrf_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27953802127","text":"\"\"\"\nA recreation of Figure 5a from \n\nHeck, Kirby S., Hannah M. Johlas, and Michael F. Howland. \"Modelling the\ninduction, thrust and power of a yaw-misaligned actuator disk.\" Journal of Fluid\nMechanics 959 (2023): A9.\n\"\"\"\nfrom pathlib import Path\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom MITWake import Rotor\n\nFIGDIR = Path(\"fig\")\nFIGDIR.mkdir(parents=True, exist_ok=True)\n\nif __name__ == \"__main__\":\n yaws = np.deg2rad(np.linspace(0, 30, 50))\n Ctprimes = [0.2, 0.4, 0.8, 1.2, 1.6, 2.0]\n\n fig = plt.figure()\n plt.xlabel(\"$\\gamma$ [deg]\")\n plt.ylabel(\"$P(\\gamma)/P(\\gamma=0)$\")\n\n for i, Ct in enumerate(Ctprimes):\n color = plt.cm.viridis(i / len(Ctprimes))\n a, u, v = Rotor.yawthrust(Ct, yaws)\n Pratio = ((1 + 0.25 * Ct) * (1 - a) * np.cos(yaws)) ** 3\n plt.plot(np.rad2deg(yaws), Pratio, c=color, label=f\"$C_T'$={Ct}\")\n\n plt.legend()\n plt.grid()\n plt.xlim(0, 30)\n plt.ylim(0.6, 1.05)\n plt.savefig(\n FIGDIR / \"example_01_power_yaw_relationship.png\", dpi=300, bbox_inches=\"tight\"\n )\n","repo_name":"Howland-Lab/MITWake","sub_path":"examples/example_01_power_yaw_relationship.py","file_name":"example_01_power_yaw_relationship.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5901588310","text":"import numpy as np\nfrom collections import namedtuple\nfrom intCodeFunctions import readInFile, runProgram, getArray_op\nimport itertools \n\ninstruction = namedtuple(\"instruction\", [\"opCode\", \"mode1\", \"mode2\", \"mode3\"])\n\ndef main(): \n\tinputs = [9,8,7,6,5]\n\t#getArray_op(\"1101\")\n\t#puzzle = readInFile(\"7input.txt\")\n\tpuzzle = np.fromstring(\"3,26,1001,26,-4,26,3,27,1002,27,2,27,1,27,26,27,4,27,1001,28,-1,28,1005,28,6,99,0,0,5\", sep = \",\", dtype = int)\n\n\t#possiblilites = list(itertools.permutations([9,8,7,6,5]))\n\tcurrHighest = 0\n\t#currPos = []\n\t#for pos in possiblilites:\n\t\t#currPuzzle = puzzle.copy()\n\t\t#thruster = 0\n\t\t#inputs = pos\n\tinp2 = 0\n\t\t#startIndex = 0\n\t\t#thruster = 0\n\n\n\twhile(True):\n\t\t#run A\n\t\t[]\n\t\t#run B\n\n\t\t#run C\n\n\t\t#run D\n\n\t\t#run E\n\tfor i in inputs:\n\t\tthruster = runProgram(puzzle, [i,inp2])\n\t\t#thruster = thruster + output\n\t\tinp2 = thruster\n\t\tif i == 5: i = 9\n\t\t# if thruster > currHighest: \n\t\t# \tcurrHighest = thruster\n\t\t\t#currPos = pos\n\n\t# print(currHighest)\n\t\n\t\n\n# The condition is True only if this file is run as a script\n# (e.g. python3 .py). It will be False if this\n# file is used in an import statement\nif __name__ == '__main__':\n\tmain()","repo_name":"lkmsf/adventOfCode-","sub_path":"2019/7day.py","file_name":"7day.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"24994673190","text":"\"\"\"\nGiven a pointer to the head of a linked list and a specific position, \ndetermine the data value at that position. Count backwards from the tail node. \nThe tail is at postion 0, its parent is at 1 and so on.\n\"\"\"\n\nclass Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n \nclass LinkedList:\n def __init__(self):\n self.head = None\n \n def getNode(self, positionFromTail):\n def append(head):\n current = head\n elements = []\n while current:\n elements.append(current.data)\n current = current.next\n \n return elements\n \n current = self.head\n elements = append(current)\n if len(elements) == 0:\n return False\n elif len(elements) == 1:\n return elements[0]\n else:\n return elements[-positionFromTail-1]\n\nif __name__ == \"__main__\":\n linked_list = LinkedList()\n\n linked_list.head = Node(4)\n three = Node(3)\n two = Node(2)\n one = Node(1)\n\n linked_list.head.next = three\n three.next = two\n two.next = one\n\n print(linked_list.getNode(2))","repo_name":"onursahil/HackerRank_Problems","sub_path":"Data_Structures/Linked_Lists/get_node_value.py","file_name":"get_node_value.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10400053890","text":"import logging\nfrom typing import Optional\nfrom typing import Union\n\nfrom cryptojwt.jwe.exception import JWEException\nfrom cryptojwt.jws.exception import NoSuitableSigningKeys\nfrom cryptojwt.jwt import utc_time_sans_frac\nfrom oidcmsg import oidc\nfrom oidcmsg.message import Message\nfrom oidcmsg.oauth2 import ResponseMessage\nfrom oidcmsg.oidc import RefreshAccessTokenRequest\nfrom oidcmsg.oidc import TokenErrorResponse\nfrom oidcmsg.time_util import time_sans_frac\n\nfrom oidcendpoint import sanitize\nfrom oidcendpoint.cookie import new_cookie\nfrom oidcendpoint.endpoint import Endpoint\nfrom oidcendpoint.exception import ProcessError\nfrom oidcendpoint.session import unpack_session_key\nfrom oidcendpoint.session.grant import AuthorizationCode\nfrom oidcendpoint.session.grant import Grant\nfrom oidcendpoint.session.grant import RefreshToken\nfrom oidcendpoint.session.token import Token as sessionToken\nfrom oidcendpoint.token.exception import UnknownToken\nfrom oidcendpoint.util import importer\n\nlogger = logging.getLogger(__name__)\n\n\nclass TokenEndpointHelper(object):\n def __init__(self, endpoint, config=None):\n self.endpoint = endpoint\n self.config = config\n self.endpoint_context = endpoint.endpoint_context\n self.error_cls = self.endpoint.error_cls\n\n def post_parse_request(self, request: Union[Message, dict],\n client_id: Optional[str] = \"\",\n **kwargs):\n \"\"\"Context specific parsing of the request.\n This is done after general request parsing and before processing\n the request.\n \"\"\"\n raise NotImplementedError\n\n def process_request(self, req: Union[Message, dict], **kwargs):\n \"\"\"Acts on a process request.\"\"\"\n raise NotImplementedError\n\n def _mint_token(self, token_type: str, grant: Grant, session_id: str,\n based_on: Optional[sessionToken] = None) -> sessionToken:\n _mngr = self.endpoint_context.session_manager\n usage_rules = grant.usage_rules.get(token_type)\n if usage_rules:\n _exp_in = usage_rules.get(\"expires_in\")\n else:\n _exp_in = 0\n\n token = grant.mint_token(\n session_id,\n endpoint_context=self.endpoint_context,\n token_type=token_type,\n token_handler=_mngr.token_handler[\"access_token\"],\n based_on=based_on,\n usage_rules=usage_rules\n )\n\n if _exp_in:\n if isinstance(_exp_in, str):\n _exp_in = int(_exp_in)\n\n if _exp_in:\n token.expires_at = time_sans_frac() + _exp_in\n\n self.endpoint_context.session_manager.set(\n unpack_session_key(session_id), grant)\n\n return token\n\n\nclass AccessTokenHelper(TokenEndpointHelper):\n def process_request(self, req: Union[Message, dict], **kwargs):\n \"\"\"\n\n :param req:\n :param kwargs:\n :return:\n \"\"\"\n _context = self.endpoint.endpoint_context\n\n _mngr = _context.session_manager\n _log_debug = logger.debug\n\n if req[\"grant_type\"] != \"authorization_code\":\n return self.error_cls(\n error=\"invalid_request\", error_description=\"Unknown grant_type\"\n )\n\n try:\n _access_code = req[\"code\"].replace(\" \", \"+\")\n except KeyError: # Missing code parameter - absolutely fatal\n return self.error_cls(\n error=\"invalid_request\", error_description=\"Missing code\"\n )\n\n _session_info = _mngr.get_session_info_by_token(_access_code, grant=True)\n grant = _session_info[\"grant\"]\n\n code = grant.get_token(_access_code)\n _authn_req = grant.authorization_request\n\n # If redirect_uri was in the initial authorization request\n # verify that the one given here is the correct one.\n if \"redirect_uri\" in _authn_req:\n if req[\"redirect_uri\"] != _authn_req[\"redirect_uri\"]:\n return self.error_cls(\n error=\"invalid_request\", error_description=\"redirect_uri mismatch\"\n )\n\n _log_debug(\"All checks OK\")\n\n issue_refresh = False\n if \"issue_refresh\" in kwargs:\n issue_refresh = kwargs[\"issue_refresh\"]\n else:\n if \"offline_access\" in grant.scope:\n issue_refresh = True\n\n _response = {\n \"token_type\": \"Bearer\",\n \"scope\": grant.scope,\n }\n\n token = self._mint_token(token_type=\"access_token\",\n grant=grant,\n session_id=_session_info[\"session_id\"],\n based_on=code)\n _response[\"access_token\"] = token.value\n _response[\"expires_in\"] = token.expires_at - utc_time_sans_frac()\n\n if issue_refresh:\n refresh_token = self._mint_token(token_type=\"refresh_token\",\n grant=grant,\n session_id=_session_info[\"session_id\"],\n based_on=code)\n _response[\"refresh_token\"] = refresh_token.value\n\n code.register_usage()\n\n # since the grant content has changed\n _mngr[_session_info[\"session_id\"]] = grant\n\n if \"openid\" in _authn_req[\"scope\"]:\n try:\n _idtoken = _context.idtoken.make(_session_info[\"session_id\"])\n except (JWEException, NoSuitableSigningKeys) as err:\n logger.warning(str(err))\n resp = self.error_cls(\n error=\"invalid_request\",\n error_description=\"Could not sign/encrypt id_token\",\n )\n return resp\n\n _response[\"id_token\"] = _idtoken\n\n return _response\n\n def post_parse_request(self, request: Union[Message, dict],\n client_id: Optional[str] = \"\",\n **kwargs):\n \"\"\"\n This is where clients come to get their access tokens\n\n :param request: The request\n :param client_id: Client identifier\n :returns:\n \"\"\"\n\n _mngr = self.endpoint_context.session_manager\n try:\n _session_info = _mngr.get_session_info_by_token(request[\"code\"],\n grant=True)\n except (KeyError, UnknownToken):\n logger.error(\"Access Code invalid\")\n return self.error_cls(error=\"invalid_grant\",\n error_description=\"Unknown code\")\n\n _grant = _session_info[\"grant\"]\n code = _grant.get_token(request[\"code\"])\n if not isinstance(code, AuthorizationCode):\n return self.error_cls(\n error=\"invalid_request\", error_description=\"Wrong token type\"\n )\n\n if code.is_active() is False:\n return self.error_cls(\n error=\"invalid_request\", error_description=\"Code inactive\"\n )\n\n _auth_req = _grant.authorization_request\n\n if \"client_id\" not in request: # Optional for access token request\n request[\"client_id\"] = _auth_req[\"client_id\"]\n\n logger.debug(\"%s: %s\" % (request.__class__.__name__, sanitize(request)))\n\n return request\n\n\nclass RefreshTokenHelper(TokenEndpointHelper):\n def process_request(self, req: Union[Message, dict], **kwargs):\n _mngr = self.endpoint_context.session_manager\n\n if req[\"grant_type\"] != \"refresh_token\":\n return self.error_cls(\n error=\"invalid_request\", error_description=\"Wrong grant_type\"\n )\n\n token_value = req[\"refresh_token\"]\n _session_info = _mngr.get_session_info_by_token(token_value, grant=True)\n\n _grant = _session_info[\"grant\"]\n token = _grant.get_token(token_value)\n\n access_token = self._mint_token(token_type=\"access_token\",\n grant=_grant,\n session_id=_session_info[\"session_id\"],\n based_on=token)\n\n _resp = {\n \"access_token\": access_token.value,\n \"token_type\": \"Bearer\",\n \"scope\": _grant.scope\n }\n\n if access_token.expires_at:\n _resp[\"expires_in\"] = access_token.expires_at - utc_time_sans_frac()\n\n _mints = token.usage_rules.get(\"supports_minting\")\n if \"refresh_token\" in _mints:\n refresh_token = self._mint_token(token_type=\"refresh_token\",\n grant=_grant,\n session_id=_session_info[\"session_id\"],\n based_on=token)\n refresh_token.usage_rules = token.usage_rules.copy()\n _resp[\"refresh_token\"] = refresh_token.value\n\n if \"id_token\" in _mints:\n try:\n _idtoken = self.endpoint_context.idtoken.make(_session_info[\"session_id\"])\n except (JWEException, NoSuitableSigningKeys) as err:\n logger.warning(str(err))\n resp = self.error_cls(\n error=\"invalid_request\",\n error_description=\"Could not sign/encrypt id_token\",\n )\n return resp\n\n _resp[\"id_token\"] = _idtoken\n\n return _resp\n\n def post_parse_request(self, request: Union[Message, dict],\n client_id: Optional[str] = \"\",\n **kwargs):\n \"\"\"\n This is where clients come to refresh their access tokens\n\n :param request: The request\n :param authn: Authentication info, comes from HTTP header\n :returns:\n \"\"\"\n\n request = RefreshAccessTokenRequest(**request.to_dict())\n\n try:\n keyjar = self.endpoint_context.keyjar\n except AttributeError:\n keyjar = \"\"\n\n request.verify(keyjar=keyjar, opponent_id=client_id)\n\n _mngr = self.endpoint_context.session_manager\n try:\n _session_info = _mngr.get_session_info_by_token(\n request[\"refresh_token\"], grant=True\n )\n except KeyError:\n logger.error(\"Access Code invalid\")\n return self.error_cls(error=\"invalid_grant\")\n\n _grant = _session_info[\"grant\"]\n token = _grant.get_token(request[\"refresh_token\"])\n\n if not isinstance(token, RefreshToken):\n return self.error_cls(\n error=\"invalid_request\", error_description=\"Wrong token type\"\n )\n\n if token.is_active() is False:\n return self.error_cls(\n error=\"invalid_request\", error_description=\"Refresh token inactive\"\n )\n\n return request\n\n\nHELPER_BY_GRANT_TYPE = {\n \"authorization_code\": AccessTokenHelper,\n \"refresh_token\": RefreshTokenHelper,\n}\n\n\nclass Token(Endpoint):\n request_cls = oidc.Message\n response_cls = oidc.AccessTokenResponse\n error_cls = TokenErrorResponse\n request_format = \"json\"\n request_placement = \"body\"\n response_format = \"json\"\n response_placement = \"body\"\n endpoint_name = \"token_endpoint\"\n name = \"token\"\n default_capabilities = {\"token_endpoint_auth_signing_alg_values_supported\": None}\n\n def __init__(self, endpoint_context, new_refresh_token=False, **kwargs):\n Endpoint.__init__(self, endpoint_context, **kwargs)\n self.post_parse_request.append(self._post_parse_request)\n if \"client_authn_method\" in kwargs:\n self.endpoint_info[\"token_endpoint_auth_methods_supported\"] = kwargs[\n \"client_authn_method\"\n ]\n self.allow_refresh = False\n self.new_refresh_token = new_refresh_token\n self.configure_grant_types(kwargs.get(\"grant_types_supported\"))\n\n def configure_grant_types(self, grant_types_supported):\n if grant_types_supported is None:\n self.helper = {k: v(self) for k, v in HELPER_BY_GRANT_TYPE.items()}\n return\n\n self.helper = {}\n # TODO: do we want to allow any grant_type?\n for grant_type, grant_type_options in grant_types_supported.items():\n if (\n grant_type_options in (None, True)\n and grant_type in HELPER_BY_GRANT_TYPE\n ):\n self.helper[grant_type] = HELPER_BY_GRANT_TYPE[grant_type]\n continue\n elif grant_type_options is False:\n continue\n\n try:\n grant_class = grant_type_options[\"class\"]\n except (KeyError, TypeError):\n raise ProcessError(\n \"Token Endpoint's grant types must be True, None or a dict with a\"\n \" 'class' key.\"\n )\n _conf = grant_type_options.get(\"kwargs\", {})\n\n if isinstance(grant_class, str):\n try:\n grant_class = importer(grant_class)\n except (ValueError, AttributeError):\n raise ProcessError(\n f\"Token Endpoint's grant type class {grant_class} can't\"\n \" be imported.\"\n )\n try:\n self.helper[grant_type] = grant_class(self, _conf)\n except Exception as e:\n raise ProcessError(f\"Failed to initialize class {grant_class}: {e}\")\n\n def _post_parse_request(self, request: Union[Message, dict],\n client_id: Optional[str] = \"\", **kwargs):\n _helper = self.helper.get(request[\"grant_type\"])\n if _helper:\n return _helper.post_parse_request(request, client_id, **kwargs)\n else:\n return self.error_cls(\n error=\"invalid_request\",\n error_description=f\"Unsupported grant_type: {request['grant_type']}\"\n )\n\n def process_request(self, request: Optional[Union[Message, dict]] = None, **kwargs):\n \"\"\"\n\n :param request:\n :param kwargs:\n :return: Dictionary with response information\n \"\"\"\n if isinstance(request, self.error_cls):\n return request\n\n if request is None:\n return self.error_cls(error=\"invalid_request\")\n\n try:\n _helper = self.helper.get(request[\"grant_type\"])\n if _helper:\n response_args = _helper.process_request(request, **kwargs)\n else:\n return self.error_cls(\n error=\"invalid_request\",\n error_description=f\"Unsupported grant_type: {request['grant_type']}\"\n )\n except JWEException as err:\n return self.error_cls(error=\"invalid_request\", error_description=\"%s\" % err)\n\n if isinstance(response_args, ResponseMessage):\n return response_args\n\n _access_token = response_args[\"access_token\"]\n _session_info = self.endpoint_context.session_manager.get_session_info_by_token(\n _access_token, grant=True)\n\n _cookie = new_cookie(\n self.endpoint_context,\n sub=_session_info[\"grant\"].sub,\n cookie_name=self.endpoint_context.cookie_name[\"session\"],\n )\n\n _headers = [(\"Content-type\", \"application/json\")]\n resp = {\"response_args\": response_args, \"http_headers\": _headers}\n if _cookie:\n resp[\"cookie\"] = _cookie\n return resp\n","repo_name":"IdentityPython/oidcendpoint","sub_path":"src/oidcendpoint/oidc/token.py","file_name":"token.py","file_ext":"py","file_size_in_byte":15545,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"} +{"seq_id":"70248704968","text":"import os\n\nimport imp\nimport inspect\n\nimport tkinter\n\nfrom pysweep import Menu, GameDisplay#, VideoFile\n\nclass PySweep:\n def __init__(self, master):\n self.master = master\n self.bind_events = []\n self.bind_protocols = []\n self.widget_bindname = \"pysweep\"\n self.bindable_widgets = {\"pysweep\": {\"bindevent\": self.bind_tkinter_event, \"bindprotocol\": self.bind_tkinter_protocol}}\n\n # Init our own stuff: Menu and GameDisplay\n self.gamedisplay = GameDisplay(master, self)\n Menu.init_menu(master)\n\n self.load_pysweep_mods()\n\n # this is a bindable_widgets so it must implement these two functions.\n # The \"name\" of the bindable widget is also the prefix to the event_name used for the hook.\n # In this case, it's \"pysweep\".\n def bind_tkinter_event(self, event_name):\n if event_name not in self.bind_events:\n hook = (self.widget_bindname, event_name)\n self.master.bind(event_name, lambda e,hook=hook: self.handle_event(hook, e))\n self.bind_events.append(event_name)\n\n def bind_tkinter_protocol(self, protocol_name):\n if protocol_name not in self.bind_protocols:\n hook = (self.widget_bindname, protocol_name)\n self.master.protocol(protocol_name, lambda e=None,hook=hook: self.handle_event(hook, e))\n self.bind_protocols.append(protocol_name)\n\n def load_pysweep_mods(self):\n # Mods dictionary\n # Key is the package name (directory name if package, filename without .py extension if file)\n # Value is a tuple containing the path to the module and either imp.PY_SOURCE or imp.PKG_DIRECTORY\n self.mods_path_dict = {}\n\n # Mod classes dict\n # Key is mod name, value is the mod's class\n self.mod_classes = {}\n\n # Dict of mods we've loaded (name: moduleinstance)\n self.mods = {}\n\n # A dictionary with hook strings as keys and a list of callbacks as values\n self.hooks = {}\n\n # load mods here\n alreadyfound = self.find_mods('mods')\n print(\"Files found: {}\".format(alreadyfound))\n print(\"Mods found: {}\".format(self.mods_path_dict))\n\n # Paths -> Classes\n for package_name, mod in self.mods_path_dict.items():\n module, _ = self.import_mod(package_name, mod[0], mod[1])\n if hasattr(module, \"mods\"):\n for modname, modclass in module.mods.items():\n self.mod_classes[modname] = modclass\n else:\n print(\"Could not find mods in {}\".format(package_name))\n\n # Classes -> Instances\n for name, modclass in self.mod_classes.items():\n print(\"Attempting to load mod {}\".format(name))\n moduleinstance = self.load_mod(modclass, name)\n if moduleinstance != None:\n print(\"Loaded mod {}\".format(name))\n else:\n print(\"{} is an invalid mod\".format(name))\n\n print(\"Mods loaded: {}\".format(list(self.mods.keys())))\n print(\"Registering bindings\")\n for name, mod in self.mods.items():\n self.register_bindings(mod)\n\n self.handle_event((\"pysweep\", \"AllModsLoaded\"), None)\n\n def is_mod(self, path):\n if os.path.isdir(path):\n return self.is_package_mod(path)\n elif os.path.isfile(path):\n return self.is_file_mod(path)\n\n def is_package_mod(self, path):\n return os.path.isfile(os.path.join(path, '__init__.py'))\n\n def is_file_mod(self, path):\n return path.endswith('.py') and not os.path.basename(path).startswith('_')\n\n def ignore_dir(self, path):\n return (os.path.basename(path).startswith(\"_\") or os.path.basename(path).startswith(\".\"))\n\n def ignore_file(self, path):\n return os.path.basename(path).startswith(\".\")\n\n def load_mod(self, moduleclass, name):\n if inspect.isclass(moduleclass):\n moduleinstance = moduleclass(self.master, self)\n if (hasattr(moduleinstance, \"hooks\") and isinstance(moduleclass.hooks, dict)):\n self.mods[name] = moduleinstance\n self.register_hooks(moduleinstance)\n return moduleinstance\n return None\n\n def register_bindings(self, moduleinstance):\n if hasattr(moduleinstance, \"required_events\"):\n for widget_name, event_name in moduleinstance.required_events:\n self.bindable_widgets[widget_name][\"bindevent\"](event_name)\n if hasattr(moduleinstance, \"required_protocols\"):\n for widget_name, protocol_name in moduleinstance.required_protocols:\n self.bindable_widgets[widget_name][\"bindprotocol\"](protocol_name)\n\n def register_hooks(self, moduleinstance):\n # Check if they have any widgets they want to allow other mods to bind to.\n if hasattr(moduleinstance, \"bindable_widgets\"):\n for name, widget in moduleinstance.bindable_widgets.items():\n self.bindable_widgets[name] = widget\n\n # Then register their callbacks into our hooks dict\n for hook in moduleinstance.hooks:\n if hook in self.hooks:\n self.hooks[hook].extend(moduleinstance.hooks[hook])\n else:\n self.hooks[hook] = moduleinstance.hooks[hook]\n\n def find_mods(self, path, alreadyfound=[]):\n # Finds mods in path recursively\n # alreadyfound keeps track of every file and directory we've accessed\n for mod in os.listdir(path):\n mod = os.path.join(path, mod)\n\n if os.path.isdir(mod) and not self.ignore_dir(mod):\n # DIRECTORY\n for found in alreadyfound:\n if os.path.samefile(mod, found):\n print(\"Already found: {}\".format(mod))\n break\n else:\n # this mod is not yet found\n print(\"Found: {}\".format(mod))\n alreadyfound.append(mod)\n if self.is_package_mod(mod):\n modname = os.path.basename(mod)\n self.mods_path_dict[modname] = (mod, imp.PKG_DIRECTORY)\n else:\n # Was not a package mod, recurse to find more mods inside\n alreadyfound = self.find_mods(mod, alreadyfound)\n\n elif os.path.isfile(mod) and not self.ignore_file(mod):\n # FILE\n for found in alreadyfound:\n if os.path.samefile(mod, found):\n print(\"Already found: {}\".format(mod))\n break\n else:\n # this mod is not yet found\n print(\"Found: {}\".format(mod))\n alreadyfound.append(mod)\n if self.is_file_mod(mod):\n modname = os.path.basename(mod)[:-3]\n self.mods_path_dict[modname] = (mod, imp.PY_SOURCE)\n return alreadyfound\n\n def import_mod(self, name, path, type_):\n if type_ == imp.PY_SOURCE:\n with open(path) as mod:\n module = imp.load_module(name, mod, path, ('.py', 'U', type_))\n elif type_ == imp.PKG_DIRECTORY:\n module = imp.load_module(name, None, path, ('', '', type_))\n else:\n raise TypeError('Unsupported module type')\n return module, os.path.getmtime(path)\n\n def handle_event(self, hookname, event):\n if hookname in self.hooks:\n for callab in self.hooks[hookname]:\n callab(hookname, event)\n","repo_name":"thomastanck/pysweeper","sub_path":"pysweep/pysweep.py","file_name":"pysweep.py","file_ext":"py","file_size_in_byte":7619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10069751381","text":"from typing import List, Tuple, Any, Dict\nimport re\n\n\ndef extract_defendants(sentences: List, tags: List, index: int, text: List[str]) -> Tuple[List, int]:\n defendants = []\n\n if not re.search('^[1-9].', text[index]):\n defendant = ''.join([i for i in text[index] if not i.isdigit() and i != '.'])\n defendants.append(defendant)\n sentences.append([defendant])\n tags.append(['Defendant'])\n return defendants, index + 1\n\n while re.search('^[1-9].', text[index]):\n defendant = ''.join([i for i in text[index] if not i.isdigit() and i != '.'])\n defendants.append(defendant)\n sentences.append([defendant])\n tags.append(['Defendant'])\n index += 1\n\n return defendants, index + 1\n\n\ndef extract_prosecutors(sentences: List, tags: List, index: int, text: List[str]) -> Tuple[List, int]:\n prosecutors = []\n\n while text[index] != 'נ' and text[index] != 'נ ג ד':\n prosecutor = ''.join([i for i in text[index] if not i.isdigit() and i != '.'])\n prosecutors.append(prosecutor)\n sentences.append([prosecutor])\n tags.append(['Prosecutor'])\n index += 1\n\n sentences.append(text[index])\n tags.append(['O'])\n\n return prosecutors, index+4\n\n\ndef extract_judges(sentences: List, tags: List, text: List[str]) -> Tuple[List, int]:\n judges, flag = [], False\n\n for i, raw_text in enumerate(text):\n if flag:\n if re.search(':$', raw_text):\n sentences.append(raw_text.split(\" \"))\n tags.append(['O'] * len(raw_text.split(\" \")))\n return judges, i+1\n\n if re.search(\"^כבוד\", raw_text):\n flag = True\n sentences.append([raw_text])\n judges.append(raw_text)\n tags.append(['Judge'])\n else:\n sentences.append(raw_text.split(\" \"))\n tags.append(['O'] * len(raw_text.split(\" \")))\n\n\ndef extract_date(sentences: List, tags: List, index: int, text: List[str]) -> Any:\n\n while ('ניתן היום' not in text[index]) and \\\n not re.search(\"\\u200f[0-9]+[.][0-9]+[.][0-9][0-9]+\", text[index]) and \\\n ('תאריך הישיבה:' not in text[index]):\n sentences.append(text[index].split(\" \"))\n tags.append(['O'] * len(text[index].split(\" \")))\n index += 1\n if index == len(text):\n return None\n\n if re.search(\"\\u200f[1-9][0-9]*[.][1-9][0-9]*[.][0-9][0-9]+\",text[index]):\n s = re.search(\"\\u200f[1-9][0-9]*[.][1-9][0-9]*[.][0-9][0-9]+\",text[index])\n\n if s:\n sentences.append([s])\n tags.append(['Date'])\n return s[0].replace('\\u200f', '')\n else:\n return None\n\n elif 'ניתן היום' in text[index] or 'תאריך הישיבה:' in text[index]:\n while not re.search(\"[1-9][0-9]*[.][1-9][0-9]*[.][0-9][0-9]+\",text[index]):\n sentences.append(text[index].split(\" \"))\n tags.append(['O'] * len(text[index].split(\" \")))\n index += 1\n if index == len(text):\n return None\n\n sentences.append([re.search(\"[1-9][0-9]*[.][1-9][0-9]*[.][0-9][0-9]+\",text[index])[0]])\n tags.append(['Date'])\n return re.search(\"[1-9][0-9]*[.][1-9][0-9]*[.][0-9][0-9]+\",text[index])[0]\n\n else:\n return None\n\n\ndef extract_ner(docs: Dict[str, List[str]]) -> Tuple[Dict, Dict]:\n names, judges, prosecutors, defendants, dates, sentences, tags = [], [], [], [], [], [], []\n\n for name, doc in docs.items():\n names.append(name)\n\n judges_tup = extract_judges(sentences, tags, doc)\n judges.append(judges_tup[0])\n\n prosecutors_tup = extract_prosecutors(sentences, tags, judges_tup[1], doc)\n prosecutors.append(prosecutors_tup[0])\n\n defendants_tup = extract_defendants(sentences, tags, prosecutors_tup[1], doc)\n defendants.append(defendants_tup[0])\n\n date = extract_date(sentences, tags, defendants_tup[1], doc)\n dates.append(date)\n\n return {\n 'Document' : names,\n 'Judges' : judges,\n 'Prosecutors' : prosecutors,\n 'Defendants' : defendants,\n 'Dates' : dates\n }, {\n 'Sentences' : sentences,\n 'Tags' : tags\n }\n\n\n","repo_name":"Aviad-Hedvat/NLP_FinalProject","sub_path":"FinalProject/ScrappingExtraction/DetailsExtraction.py","file_name":"DetailsExtraction.py","file_ext":"py","file_size_in_byte":4257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"16024429722","text":"#yield from\n\n\ndef count():\n print('新的count----')\n total = 0.0\n avg = 0.0\n num = 0\n while True:\n data = yield avg\n if not data:\n break\n total += data\n num += 1\n avg = total / num\n print(avg)\n print('完成count')\n return num, avg\n\n\ndef pipe(res, key):\n print('新的协程')\n while True:\n # try:\n yield from count()\n # print('统计{}数据完成'.format(key))\n\n # except StopIteration:\n # print('已经不能运行')\n\n print('新的协程')\n\n\ndef start(data):\n res = {}\n for k, v in data.items():\n # print(k,v)\n group = pipe(res, k)\n next(group)\n for v1 in v:\n group.send(v1)\n group.send(None)\n # print(res)\n\n\ndata = {\n \"\"\"\n 成绩\n \"\"\"\n 'A班': [90, 89, 60, 68],\n 'B班': [90, 13, 43, 13, 53],\n 'C班': [12, 86, 43, 34, 89, 98, 89]\n}\n\n#1. yield from\n\ndef yield1(n):\n for i in range(n):\n yield i\n\ndef yield2(n):\n yield from range(n)\n#yield1 yield2相互代替\n#\n# print(next(y))\n# print(next(y))\n# print(next(y))\n#可以迭代可迭代对象\n#一切皆对象\n#字符串对象\n#对象里面有很多方法\n#__iter__或者 __next__() __getitem__()\n\nx = \"adsf\"\nnum = 23232323\nfor i in x:\n print(i)\nfor i in yield2(100000):\n print(i)\n#return\n\n#yield from 第二个作用 ��立一个管道,使用调用者能调用被调用者","repo_name":"nie000/mylinuxlearn","sub_path":"rimi_linux_mysql/tcp_ip_socket/socket_chat/p1805/yields/test10.py","file_name":"test10.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"23129658342","text":"import requests\n\nresponse = requests.get('https://api.github.com/search/repositories', params={'q': 'requests+language:python'})\n\ncode = response.status_code\n\nif code == 200:\n print(\"Success\")\n headers = response.headers;\n\n for header_item in headers:\n print(f\"{header_item}: {headers[header_item]}\")\n\n json = response.json()\n for json_item in json:\n print(json_item)\nelse:\n print(\"Fail\")","repo_name":"devskyfly/programming","sub_path":"Python/robot/req.py","file_name":"req.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11608130103","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"\n@author: Rosen\n@file: operation.py\n@time: 10/14/16 5:24 PM\n\"\"\"\nimport paramiko\n\n\nclass operation(object):\n\tdef __init__(self, name, private_key_path):\n\t\tself.__ssh = paramiko.SSHClient()\n\t\tself.__key = paramiko.RSAKey.from_private_key_file(private_key_path)\n\t\tself.username = name\n\t\tself.port = 22\n\n\tdef output(self, cmd):\n\t\tstdin, stdout, stderr = self.__ssh.exec_command(cmd)\n\t\tfor i in [stdout, stderr]:\n\t\t\tif i:\n\t\t\t\tprint(i.read())\n\n\tdef once(self, host):\n\t\tself.__ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\t\tself.__ssh.connect(host, self.port, self.username, pkey=self.__key)\n\t\tFlag = True\n\t\twhile Flag:\n\t\t\tcmd = self.loop_cmd()\n\t\t\tself.output(cmd)\n\t\t\tif cmd == 'exit':\n\t\t\t\tself.__ssh.close()\n\t\t\t\tFlag = False\n\n\tdef batch(self, host, cmd):\n\t\tself.__ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\t\tself.__ssh.connect(host, self.port, self.username, pkey=self.__key)\n\t\tself.output(cmd)\n\t\tself.__ssh.close()\n\n\t@staticmethod\n\tdef loop_cmd():\n\t\twhile True:\n\t\t\tinp = raw_input('Please input the command to be executed(if you want to exit the enter \"exit\"): ')\n\t\t\tif inp == 'exit':\n\t\t\t\treturn inp\n\t\t\treturn inp\n\n\nif __name__ == '__main__':\n\tpass\n","repo_name":"rosenlo/notes","sub_path":"python/codes/python/thread/paramiko_demo/HostManage/hostmanage/models/operation.py","file_name":"operation.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"5891347577","text":"\"\"\"Implement an algorith to determine if a string has all unique characters\"\"\"\n\nCHARSET_LENGTH = 128\n\n# time complexity O(n), space complexity O(c) being c the charset length\ndef is_string_unique(string):\n if len(string) > CHARSET_LENGTH:\n return false\n\n chars_dic = {}\n for char in string:\n if char in chars_dic:\n return False\n chars_dic[char] = True\n\n return True\n\n\n# time complexity O(n logn), space complexity O(c) being c the charset length\n\"\"\"What if you cannot use additional data structures?\"\"\"\ndef is_string_unique_2(string):\n if len(string) > CHARSET_LENGTH:\n return false\n\n sorted_string = ''.join(sorted(string))\n for index, char in enumerate(sorted_string[:-1]):\n next_char = sorted_string[index + 1]\n if char == next_char:\n return False\n\n return True\n\n","repo_name":"Razboi/competitive-programming","sub_path":"python/is_string_unique.py","file_name":"is_string_unique.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"3563155179","text":"'''\nCreated on Nov 4, 2015\n\n@author: krgupta\n'''\n\ntry:\n from ConfigParser import SafeConfigParser\n from ConfigParser import NoSectionError\nexcept ImportError:\n from configparser import SafeConfigParser\n from configparser import NoSectionError\n\nimport os\nimport sys\nimport logging\n#from __future__ import print_function\n\nlogger = logging.getLogger(__name__)\n\nclass helper(): \n __parser = \"null\"\n __propertyfilename = \"null\"\n __initialized = False\n \n @staticmethod\n def getparser():\n return helper.__parser\n \n @staticmethod\n def getpropertyfile():\n return helper.__propertyfilename\n\n @staticmethod\n def setpropertyfile(propertyfilename):\n if (propertyfilename == 'null' or os.path.isfile(propertyfilename) == False):\n helper.__propertyfilename = 'null' \n else: \n helper.__propertyfilename = propertyfilename\n return\n\n @staticmethod\n def __classinitialized():\n return helper.__initialized\n \n @staticmethod\n def getproperty(propertyname):\n stringvalue = \"null\"\n\n if ('null' != helper.getpropertyfile()):\n if (False == helper.__classinitialized()):\n if ('null' == helper.getparser()):\n try:\n helper.__parser = SafeConfigParser({\"http\":\"\",\"https\":\"\",\"ftp\":\"\"})\n except:\n logger.debug(\"Parser could not be initialized\")\n\n if ('null' != helper.getparser()):\n try:\n helper.getparser().read(helper.__propertyfilename)\n helper.__initialized = True\n except:\n logger.debug(\"Unable to load the property file\")\n\n if (True == helper.__classinitialized()):\n try:\n stringvalue = helper.getparser().get(\"properties\", propertyname)\n except:\n logger.debug(\"'%s' not found\\n\" %propertyname )\n \n if ( \"null\" == stringvalue):\n stringvalue = os.getenv(propertyname) \n return stringvalue ","repo_name":"AuthorizeNet/sdk-python","sub_path":"authorizenet/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"16"} +{"seq_id":"71437726088","text":"import tkinter as tk\r\nfrom tkinter import ttk\r\nfrom tkinter import filedialog as fd\r\nfrom tkinter.messagebox import showinfo\r\nfrom tkinter import font\r\nimport compressor\r\nimport os\r\n\r\nroot = tk.Tk()\r\nroot.title('File Compressor')\r\nroot.resizable(True, True)\r\nroot.geometry('500x200')\r\n\r\n#default file size\r\nfileCompressSize = 8\r\nnewDir = 'Finished Compressed Files'\r\nparentDir = os.getcwd()\r\npath = os.path.join(parentDir, newDir)\r\nif not os.path.isdir(path):\r\n os.mkdir(path)\r\n\r\n\r\ndef select_file():\r\n filetypes = (\r\n ('All files', '*.*'),\r\n ('mp4 files', '*.mp4'),\r\n ('jpg files', '*.jpg'),\r\n ('png files', '*.png')\r\n )\r\n\r\n filename = fd.askopenfilenames(\r\n title = 'Open a file',\r\n initialdir = '/',\r\n filetypes = filetypes\r\n )\r\n if filename:\r\n for fileLocation in filename:\r\n file = os.path.basename(fileLocation)\r\n fileTitle = file.split('.')\r\n fileEnd = fileTitle[-1]\r\n fileTitle = fileTitle[0]\r\n compressor.compress_video(fileLocation,f'{path}\\{fileTitle}_%03d.{fileEnd}', fileCompressSize *1000 , fileEnd)\r\n \r\n\r\n message = 'Successful Compression'\r\n else:\r\n message = 'No File Selected'\r\n showinfo(\r\n title = 'File Compressor',\r\n message = message\r\n )\r\n\r\n\r\ndef selFileSize(size):\r\n labelSize ['text'] = 'Current Size: '+ str(size)+\"mb\"\r\n global fileCompressSize\r\n fileCompressSize = size\r\n\r\n\r\n\r\nlabelTitle = tk.Label(text=\"Welcome to FFMPEG Compressor\", fg=\"black\")\r\nlabelSize = tk.Label(text=\"Select a file size.\", fg=\"black\")\r\nlabelInfo = tk.Label(text=\"*PNGs will compress as best as possible\", fg=\"black\")\r\n\r\n\r\n\r\nopen_button = ttk.Button(\r\n root,\r\n text = 'Open a File',\r\n command = select_file\r\n)\r\n\r\nsize8mb = ttk.Button(\r\n root,\r\n text = '8mb',\r\n command = lambda: selFileSize(8)\r\n)\r\nsize50mb = ttk.Button(\r\n root,\r\n text = '50 mb',\r\n command = lambda: selFileSize(50)\r\n)\r\n\r\nlabelTitle.pack()\r\nf = font.Font(labelTitle, labelTitle.cget(\"font\"))\r\nf.configure(underline=True)\r\nlabelTitle.configure(font=(f,20))\r\nlabelSize.pack()\r\nlabelInfo.pack()\r\nsize8mb.pack()\r\nsize50mb.pack()\r\nopen_button.pack(expand=True)\r\n\r\n\r\nroot.mainloop()\r\n\r\n\r\n","repo_name":"MamczynskiMichael/FFmpeg-8mb-Compressor","sub_path":"FFmpeg8mbCompressor/compressorGUI.py","file_name":"compressorGUI.py","file_ext":"py","file_size_in_byte":2267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"21013387014","text":"#!/usr/bin/env python3\n\"\"\"\nPrepares inputs for the next round of match iteration. This script assumes that a directory containing Pymol sessions of\nquality matches named \"[match_name]-pretty.pse\" exists.\n\nThis script will generate the following:\n\n * New directory with copies of quality match .pdbs from \n * .csv and .txt files required for BSFF matching iterations\n\nIdea: generate new posfiles for iterations where only positions directly adjacent to the ligand position are considered.\nThis should cut down a lot of wasted time considering match positions we know will not be productive.\n\nUsage:\n prepare_match_iteration [--monomer] [--posfile ]\n\nArguments:\n \n Path to directory containing Pymol sessions of quality matches\n\n \n Path to original PDB with natively bound target ligand\n\n \n Path to file containing list of scaffold PDBs\n\n \n Three-letter ligand code\n\n --monomer -m\n Monomer matches\n\n --posfile -p\n Generate new posfiles for iteration\n\"\"\"\nimport docopt\nimport shutil\nimport os\nimport re\n\nimport pandas as pd\nimport prody\n\ndef copy_matches(pymol_dir, match_dir, final_dir_name='Matches-Final'):\n \"\"\"\n Copies quality matches from match_dir\n :return:\n \"\"\"\n # New directory for final match picks\n final_match_dir = os.path.join(os.getcwd(), final_dir_name)\n os.makedirs(final_match_dir, exist_ok=True)\n\n # Iterate through pymol_dir\n quality_match_list = []\n for pymol_session in os.listdir(pymol_dir):\n if pymol_session.endswith('.pse'):\n quality_match_list.append(pymol_session.replace('-pretty.pse', '.pdb'))\n print('\\nFound {0} quality matches in {1}...\\n'.format(len(quality_match_list), pymol_dir))\n\n # Move matches in quality_match_list from match_dir into final_match_dir\n moved_matches = []\n for match in quality_match_list:\n src = os.path.join(match_dir, match)\n if os.path.exists(src):\n dst = os.path.join(final_match_dir, match)\n shutil.copy2(src, dst)\n moved_matches.append(match)\n\n # Report\n missing_matches = set(quality_match_list) - set(moved_matches)\n if len(missing_matches) == 0:\n print('All quality matches transferred successfully to {0}!'.format(final_match_dir))\n else:\n print('The following matches were not found in {0}:'.format(match_dir))\n for missing in missing_matches:\n print(missing)\n\n print('\\n')\n\n return final_match_dir\n\n\ndef generate_iteration_inputs(final_match_dir, output_name_prefix='gurobi_constraints-residue_count', monomer=False):\n \"\"\"\n Generate .csv and .txt for match iteration\n :return:\n \"\"\"\n df_dict_list = []\n\n # Generate csv\n for match in os.listdir(final_match_dir):\n if match.endswith('.pdb'):\n conformer_name, constraint_resnums = find_conformer_and_constraint_resnums(match, monomer=monomer)\n scaffold = re.split('_|-|\\.', match)[4]\n constraint = '{}-{}'.format(conformer_name, '1_' + '_'.join([str(a) for a in constraint_resnums]))\n\n temp_dict = {'scaffold': scaffold,\n 'constraint': constraint\n }\n\n df_dict_list.append(temp_dict)\n\n df = pd.DataFrame(df_dict_list)\n df.to_csv('{0}-{1}.csv'.format(output_name_prefix, len(constraint_resnums)), index=False)\n\n # Get non-redundant list of constraints\n constraint_set = set()\n for index, row in df.iterrows():\n constraint_set.add(row['constraint'])\n\n # Generate txt\n with open('{0}-{1}.txt'.format(output_name_prefix, len(constraint_resnums)), 'w') as file:\n for constraint in list(constraint_set):\n file.write('{0}\\n'.format(constraint))\n\n # Report\n print('{0} unique constraints found in the current match set\\n'.format(len(constraint_set)))\n print('Iteration inputs written to {0}\\n'.format(os.getcwd()))\n\n\ndef find_conformer_and_constraint_resnums(pdb_name, monomer=False):\n \"\"\"\n Generates name of ideal binding motif from match PDB name\n :param pdb_name:\n :return:\n \"\"\"\n pdb_split = re.split('_|-|\\.', pdb_name)\n\n ligand_name_index = 5\n conformer_id_index = 6\n\n if len(pdb_split[4]) == 4:\n ligand_name_index += 1\n conformer_id_index += 1\n\n ligand_name = pdb_split[ligand_name_index]\n conformer_id = pdb_split[conformer_id_index]\n conformer_name = '{}_{}'.format(ligand_name, conformer_id)\n\n constraint_resnum_block = re.split('-|\\.', pdb_name)[1][:-2]\n constraint_resnums = [int(a) for a in constraint_resnum_block.split('_') if a != ''][1:]\n\n return conformer_name, constraint_resnums\n\n\ndef create_unique_match_ID(match, monomer=False):\n pnc = re.split('_|-|\\.', match)\n return '{0}-{1}'.format(pnc[4], pnc[2])\n\n\ndef generate_posfile_things(ligand_ID, scaffold_file, monomer=False):\n \"\"\"\n Generate new posfiles for match iteration\n :return:\n \"\"\"\n\n # Inclulde residues with CA within 8A of ligand\n distance = 9\n\n # Get relevant positions for all matches\n position_dict = {}\n\n # New directory for posfiles to be generated\n posfile_path = os.path.join(os.getcwd(), 'posfiles')\n os.makedirs(posfile_path, exist_ok=True)\n\n # --- Iterate through matches to get match positions --- #\n # Bin posfiles by scaffold and previous match positions. I previously binned on scaffold only, but that blew up the\n # total number of match positions since a single scaffold could potentially accommodate several binding motifs in\n # many different configurations allowed by the inital match posfile.\n\n for match in os.listdir(final_match_dir):\n\n if match.endswith('.pdb'):\n unique_mathch_ID = create_unique_match_ID(match, monomer=monomer)\n position_dict[unique_mathch_ID] = set()\n\n for match in os.listdir(final_match_dir):\n if match.endswith('.pdb'):\n\n match_prody = prody.parsePDB(os.path.join(final_match_dir, match))\n designable_residues = match_prody.select('(calpha within {0} of resname {1}) and not resname PRO GLY'.format(distance, ligand_ID))\n posfile_list = [atom.getResnum() for atom in designable_residues]\n\n unique_mathch_ID = create_unique_match_ID(match, monomer=monomer)\n position_dict[unique_mathch_ID] = position_dict[unique_mathch_ID] | set(posfile_list)\n\n import pprint\n pprint.pprint(position_dict)\n\n # --- Create posfiles for relevant scaffolds --- #\n scaffold_file = open(scaffold_file)\n scaffold_list = [a.strip() for a in scaffold_file if a]\n\n relevant_scaffolds = [scaffold for scaffold in scaffold_list if scaffold[:4] in position_dict.keys()]\n for key, value in position_dict.items():\n posfile_name = '{}.pos'.format(key)\n with open(os.path.join(posfile_path, posfile_name), 'w') as posfile:\n posfile.write(' '.join([str(a) for a in value]))\n\n\nif __name__ == '__main__':\n args = docopt.docopt(__doc__)\n\n pymol_dir = args['']\n match_dir = args['']\n monomer_matches = args['--monomer']\n posfile = args['--posfile']\n ligand_ID = args['']\n scaffold_file = args['']\n\n # --- Generate match iteration inputs -- #\n final_match_dir = copy_matches(pymol_dir, match_dir)\n generate_iteration_inputs(final_match_dir, monomer=monomer_matches)\n\n # --- Generate new posfiles for iteration --- #\n if posfile:\n generate_posfile_things(ligand_ID, scaffold_file, monomer_matches)\n\n\n","repo_name":"jaaamessszzz/BindingSitesFromFragments-Utilities","sub_path":"Matching/prepare_match_iteration.py","file_name":"prepare_match_iteration.py","file_ext":"py","file_size_in_byte":7667,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"31038984833","text":"def occ_sort(s):\n mao = {}\n for i in s:\n if i not in mao:\n mao[i] = 1\n else:\n mao[i] += 1\n k = (sorted(mao.items(), key=lambda x: x[1], reverse=True))\n ans = ''\n for i in k:\n temp = ''\n temp += \"\".join([i[0] for _ in range(i[1])])\n ans += temp\n print(ans)\n # return \"\".join([i[0] for i in k])\n\n\nprint(occ_sort(\"bloomberg\"))\n","repo_name":"surajgholap/python-Misc","sub_path":"occurence_sort.py","file_name":"occurence_sort.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70520243529","text":"from flask import Blueprint, render_template, jsonify, request\nfrom .models import Report, ReportResult\nfrom app import db\nfrom sqlalchemy import func, text\n\n\nreport_page = Blueprint('report', __name__, template_folder='../templates')\n\n\ndef add_blacklist(tx_from, tx_to, fraud, curious):\n\tr = Report(\n\t\t\tsymbol='ETH',\n\t\t\ttx_from=tx_from,\n\t\t\ttx_to=tx_to,\n\t\t\tquantity=0, \n\t\t\tdescription='Blacklist',\n\t\t\tfraud=fraud,\n\t\t\tcurious=curious\n\t\t)\n\tdb.session.add(r)\n\tdb.session.commit()\n\n\ndef add_report_result(data):\n\tr = ReportResult(\n\t\t\ttx_to=data[0],\n\t\t\ttotal_fraud=data[1],\n\t\t\ttotal_curious=data[2],\n\t\t)\n\tdb.session.add(r)\n\tdb.session.commit()\n\n\n@report_page.route('/reports/new', methods=['GET'])\ndef new_report():\n\t# rows = eval(open('/app/src/app/report/blacklist.json', 'r').read())\n\t# for row in rows:\n\t# \tadd_blacklist('', row, 1, 0)\n\t# \tfor sub in rows[row]:\n\t# \t\tadd_blacklist(row, sub, 0, 1)\n\n\treturn render_template(\"new_report.html\")\n\n\n@report_page.route('/reports', methods=['GET'])\ndef report_list():\n\t# sql = text('SELECT tx_to, SUM(fraud), SUM(curious) FROM reports GROUP BY tx_to ORDER BY 1')\n\t# result = db.engine.execute(sql)\n\t# for row in result:\n\t# \tadd_report_result(row)\n\n\treturn jsonify([report_result.serialize() for report_result in ReportResult.query.all()])\n\n\n@report_page.route('/reports', methods=['POST'])\ndef post_report():\n \"\"\"\n Example: curl -d '{\"tx_from\":\"value1\", \"tx_to\":\"value2\", \"quantity\": 1, \"description\": \"I think this is fraud\", \"fraud\": 1, \"curious\": 0}' -H \"Content-Type: application/x-www-form-urlencoded\" -X POST http://localhost:8080/reports\n \"\"\"\n data = request.get_json(force=True)\n report = Report(\n symbol='ETH',\n tx_from=data['tx_from'],\n tx_to=data['tx_to'],\n quantity=data['quantity'], \n description=data['description'],\n fraud=data['fraud'],\n curious=data['curious']\n )\n db.session.add(report)\n db.session.commit()\n return 'Thanks for your report!', 201\n\n","repo_name":"DeactivatedWhatSoever/addsec","sub_path":"app/report/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"13484534220","text":"#coding=utf-8\n\nimport os\nimport sys\nimport datetime\nimport collections\nimport util\n\nsys.path.append('..')\nimport utils.helper as helper\nimport utils.config as config\n\nimport pandas as pd\n\n\n#1,湖州,885,8,4,12,2,美食,休闲茶饮,饮品/甜点\n\ndef loadWeatherData():\n res = collections.defaultdict(dict)\n with open(os.path.join(config.DATASET,'weather_all.csv')) as fin:\n for line in fin:\n tmp = line.strip().split(\",\")\n city = tmp[0]\n date = datetime.datetime.strptime(tmp[1],'%Y-%m-%d')\n max_tem = int(tmp[2])\n min_tem = int(tmp[3])\n we = tmp[4]\n wind = tmp[-1]\n res[city][date] = [max_tem,min_tem,we,wind] \n return res\n\ndef getFeature(tmp):\n #weather_list = ['雨','小雨','中雨','大雨','暴雨','雪','暴雪','雷','雾','霾']\n weather_dict = {\n '雨':1,\n '小雨':1,\n '中雨':2,\n '大雨':3,\n '暴雨':4,\n '雪':1,\n '暴雪':4,\n '雷':3,\n '雾':1,\n '霾':2\n }\n res = [tmp[0],tmp[1]]\n w_f = 0\n for w in weather_dict:\n if w in tmp[2]:\n w_f = max(w_f,weather_dict[w])\n\n #w_f = [0] * len(weather_list)\n #for i in xrange(len(weather_list)):\n # if weather_list[i] in tmp[2]:\n # w_f[i] = 1\n\n wind_f = 0\n if '5' in tmp[3] or '6' in tmp[3]:\n wind_f = 3\n elif '4' in tmp[3] or ('3' in tmp[3] and '小于3' not in tmp[3]):\n wind_f = 2\n else:\n wind_f = 1\n res.append(w_f)\n\n res.append(wind_f)\n\n msg = ['max_tem','min_tem'] + ['weather'] + ['wind']\n return res,msg\n\ndef getShopFeatures(gap):\n weatherdata = loadWeatherData()\n shop_info = helper.loadShopInfo()\n\n flag = True\n with open(helper.get_feature_path(gap,'weather_feature'),'w') as fout:\n for shop in xrange(1,2001):\n shop = str(shop)\n city = shop_info[shop][0]\n for date in util.getDate(start = config.LABELSTARTDATE):\n feature,msg = getFeature(weatherdata[city][date])\n if flag:\n fout.write(\"%s\\n\"%(\",\".join(msg)))\n flag = False\n tmp = \",\".join(map(str,feature) )\n fout.write(\"%s,%s,%s\\n\"%(tmp,shop,date.strftime(\"%Y-%m-%d\")))\n\ndef run(gap):\n getShopFeatures(gap)\n\n\nif __name__ == '__main__':\n run(sys.argv[1])\n","repo_name":"upperli/ijcai2017","sub_path":"feature/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":2488,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"23833383960","text":"\"\"\"\nLoad model with custom model\n\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow import keras\nimport load_data\n\n\nclass ResidualBlock(keras.layers.Layer):\n def __init__(self, n_layers, n_neurons, **kwargs):\n super().__init__(**kwargs)\n self.hidden = [keras.layers.Dense(n_neurons, activation=\"elu\",\n kernel_initializer=\"he_normal\")\n for _ in range(n_layers)]\n self.n_layers = n_layers\n self.n_neurons = n_neurons\n\n def call(self, inputs):\n Z = inputs\n for layer in self.hidden:\n Z = layer(Z)\n return inputs + Z # to keep this correct, make sure `n_neurons` and `inputs` have the same dimentsion\n\n def get_config(self):\n \"\"\" implement this if want to save model \"\"\"\n base_config = super().get_config()\n return {**base_config, \"n_layers\": self.n_layers, \"n_neurons\": self.n_neurons}\n\n\ndef main():\n X_train_scaled, X_valid_scaled, X_test_scaled, y_train, y_valid, y_test = load_data.load_california_housing()\n\n model = keras.models.load_model(\"./saved_model/A5_custom_model.h5\", custom_objects={\"ResidualBlock\": ResidualBlock})\n\n history = model.fit(X_train_scaled, y_train, epochs=2)\n model.evaluate(X_test_scaled, y_test)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"liCCcccs/my_HandsOnML_note","sub_path":"Chapter12/A5_load_model.py","file_name":"A5_load_model.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"18834408917","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\nfrom collections import defaultdict\n\ndic = defaultdict(list)\nlis=[]\nn,m=map(int,input().split())\nfor i in range(n):\n t=input()\n dic[t].append(i+1)\nfor i in range(m):\n lis.append(input())\nfor i in range(len(lis)):\n\n o=dic[lis[i]]\n if len(o)==0:\n print(-1)\n else:\n for k in range(len(dic[lis[i]])):\n print(dic[lis[i]][k],end=\" \")\n print(\"\")\n\n\n\n","repo_name":"guptaShantanu/Python-Programs","sub_path":"default_dic.py","file_name":"default_dic.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11686723837","text":"def isPalindrom(inputStr):\n if(inputStr == inputStr[::-1]): \n return True \n else: \n return False\n\ndef main():\n print(\"This program determines whether or not it is a palindrome.\")\n curStr = input(\"Please enter a five-digit integer: \")\n if(len(curStr) != 5 or not curStr.isdigit()):\n print(\"Invalid input!\")\n else:\n if(isPalindrom(curStr)):\n print(\"The 5-digit integer is palindrome.\")\n else:\n print(\"The 5-digit integer is not palindrome.\")\n\nif __name__ == \"__main__\":\n main()\n ","repo_name":"wangziming0915/CS500","sub_path":"lect01/q3.py","file_name":"q3.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"39550443329","text":"#!/usr/bin/env python3\n\nimport pygame\n\n# Initialize pygame library\npygame.init()\n\n# Create the screen\nscreen = pygame.display.set_mode((800, 600))\n\n# Title Icon and name\npygame.display.set_caption(\"Space Invaders\")\nicon = pygame.image.load('./src/ufo.png')\npygame.display.set_icon(icon)\n\n# Player\nplayerImg = pygame.image.load('./src/player.png')\nplayerX = 370\nplayerY = 480\n\n\ndef player(x, y):\n screen.blit(playerImg, (x, y))\n\n\nrunning = True\n\n# Game Loop\nwhile running:\n # Look through all events in the game\n for event in pygame.event.get():\n # If the quit button is pressed stop the game\n if event.type == pygame.QUIT:\n running = False\n \n # If keystroke is pressed check whether its right or left\n if event.type == pygame.KEYDOWN:\n print(\"Some key is pressed\")\n if event.key == pygame.K_LEFT:\n print(\"Left arrow is pressed\")\n if event.key == pygame.K_RIGHT:\n print(\"Right arrow is pressed\")\n if event.type == pygame.KEYUP:\n print(\"Some key is released\")\n if event.key == pygame.K_LEFT:\n print(\"Left arrow is released\")\n if event.key == pygame.K_RIGHT:\n print(\"Right arrow is released\")\n # Have to redraw background on every frame\n screen.fill((0, 0, 0))\n player(playerX, playerY)\n pygame.display.update()\n","repo_name":"LSW-Programming-Club/examples","sub_path":"Python/9_29/Concepts/22_input.py","file_name":"22_input.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"40587693782","text":"# Created by Qixun Qu\n# quqixun@gmail.com\n# 2017/05/07\n\n# This script is to test a new slice\n# with the trained model.\n\n\nimport numpy as np\nimport tensorflow as tf\nimport tensorlayer as tl\nfrom train_model import TrainModel\nfrom sklearn.metrics import confusion_matrix\nfrom matplotlib import pyplot as plt\n\n\nclass TestModel():\n\n def __init__(self, data):\n '''__INIT__\n\n Initialization of instance.\n\n '''\n\n self.data = data.test # Test data\n self.dims = data.dims # Dimensions of test data\n self.fn = data.fn # The number of features\n self.cn = data.cn # The number of classes\n\n self.pred = [] # Predicted labels\n self.true = [] # Real labels\n\n return\n\n def similar_coefficient(self, y_true, y_pred, method='di'):\n '''SIMILAR_COEFFICIENT\n\n Evaluate similarity between predicted labels and\n real labels. This function provides two method:\n dice index and jaccard index.\n\n '''\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n\n sc = np.zeros(self.cn)\n mt_str = ''\n\n # Calculate similar coefficient\n for i in range(self.cn):\n if method == 'di':\n sc[i] = 2 * cm[i, i] / (np.sum(cm[i, :] + cm[:, i]))\n mt_str = 'Dice Index'\n elif method == 'ji':\n sc[i] = cm[i, i] / ((np.sum(cm[i, :] + cm[:, i])) - cm[i, i])\n mt_str = 'Jaccard Index'\n else:\n print(\"Wrong method!\")\n return None\n\n # Print the result\n print(\"\\n\" + mt_str + \":\")\n print(\"CSF({0:.4f}) GM({1:.4f}) WM({2:.4f})\\n\"\n .format(sc[0], sc[1], sc[2]))\n\n return sc\n\n def test_model(self, model='model.npz'):\n '''TEST_MODEL\n\n Segment a new brain slice with the trained model,\n getting predicted labels and calculate similar coefficient.\n\n '''\n\n if self.data.shape[0] == 0:\n print(\"Test slice - This slice has no CSF, GM and WM.\")\n return\n\n # Computation graph\n data_num = self.data.shape[0]\n x = tf.placeholder(tf.float32, shape=[data_num, self.fn])\n\n # Obtain the result form the model\n net = TrainModel().build_network(x)\n y_out = net.outputs\n\n # Obtain the predicted labels\n pred = tf.reshape(tf.argmax(y_out, 1) + 1, shape=[data_num])\n\n # Assign model's weights with the saved parameters\n sess = tf.Session()\n params = tl.files.load_npz(name=model)\n tl.files.assign_params(sess, params, net)\n\n y_pred = sess.run(pred, feed_dict={x: self.data[:, 1:-1]})\n y_pred = y_pred.reshape((-1, 1))\n y_true = self.data[:, -1].reshape((-1, 1)).astype(int)\n\n # Calculate similar coefficient\n self.similar_coefficient(y_true, y_pred, 'di')\n self.similar_coefficient(y_true, y_pred, 'ji')\n\n sess.close()\n\n self.pred = y_pred\n self.true = y_true\n\n return\n\n def compare_slice(self):\n '''COMPARE_SLICES\n\n Plot real slice, predicted slice and the wrong cases\n in the slice.\n\n '''\n\n if self.data.shape[0] == 0:\n print(\"Plot slice - This slice has no CSF, GM and WM.\")\n return\n\n v_shape = self.dims[1:3]\n pred_v = np.zeros(v_shape).reshape((-1, 1))\n true_v = np.zeros(v_shape).reshape((-1, 1))\n\n idx = self.data[:, 0].astype(int)\n pred_v[np.array(idx)] = self.pred\n true_v[np.array(idx)] = self.true\n\n pred_v = pred_v.reshape(v_shape)\n true_v = true_v.reshape(v_shape)\n\n # Extract errors and determine their locations in slice\n errors = np.where(pred_v != true_v)\n x, y = errors[0], errors[1]\n\n plt.figure()\n # Plot real slice\n plt.subplot(1, 3, 1)\n plt.imshow(true_v, cmap='gray')\n plt.axis('off')\n plt.title('Ground Truth', fontsize=22)\n # Plot predicted slice\n plt.subplot(1, 3, 2)\n plt.imshow(pred_v, cmap='gray')\n plt.axis('off')\n plt.title('Prediction', fontsize=22)\n # Plot error\n plt.subplot(1, 3, 3)\n plt.imshow(true_v, cmap='gray')\n plt.scatter(y, x, s=3, c='r')\n plt.axis('off')\n plt.title('Errors ({0}/{1})'.format(len(x), len(self.pred)),\n fontsize=22)\n\n # Display the plot in full screen\n fig = plt.get_current_fig_manager()\n fig.window.showMaximized()\n plt.show()\n\n return\n","repo_name":"quqixun/SegBrain","sub_path":"test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":4605,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"25583887917","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib import messages\nfrom .forms import NewRegistrationForm\n\n\n# Create your views here.\ndef register(request):\n\tif request.method == \"GET\":\n\t\tform = NewRegistrationForm()\n\t\treturn render(request, \"users/register.html\", {\"form\" : form})\n\n\telse:\n\t\tform = NewRegistrationForm(request.POST)\n\t\tif form.is_valid():\n\t\t\t\n\t\t\tuser = form.save()\n\t\t\tuser.first_name = request.POST[\"firstName\"]\n\t\t\tuser.last_name = request.POST[\"lastName\"]\n\t\t\tuser.save()\n\t\t\tusername = form.cleaned_data.get(\"username\")\n\t\t\tmessages.success(request, f\"Account created for {username}!\")\n\t\t\treturn redirect(\"login\")\n\t\telse:\n\t\t\treturn render(request, \"users/register.html\", {\"form\" : form})","repo_name":"LeydenJar/project3","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"13401367490","text":"# Given a square matrix mat, return the sum of the matrix diagonals.\n#\n# Only include the sum of all the elements on the primary diagonal and all the elements on the secondary\n# diagonal that are not part of the primary diagona\nclass Solution:\n def diagonalSum(self, mat: list[list[int]]) -> int:\n\n xLen = len(mat)\n\n for i in mat:\n yLen = len(i)\n if yLen != xLen:\n return 0\n\n x1 = 0\n x2 = yLen - 1\n y = 0\n sum = 0\n\n while y < xLen:\n sum += mat[y][x1]\n if x1 != x2:\n sum += mat[y][x2]\n # print(\"mat[y][x1]:\", mat[y][x1])\n # print(\"mat[y][x2]:\", mat[y][x2])\n #\n # print(\"x1: \", x1)\n # print(\"x2: \", x2)\n # print(\"y: \", y)\n\n print(\"\\n\")\n x1 += 1\n x2 -= 1\n y += 1\n\n return (sum)\n\n def diagonalSum2(self, mat: list[list[int]]) -> int:\n ans = 0\n # mat_len_even = len(mat)//2\n # mat_len_odd = len(mat)%2\n for i in range(len(mat) // 2):\n ans += sum([mat[i][i], mat[i][-1 - i], mat[-1 - i][i], mat[-1 - i][-1 - i]])\n if len(mat) % 2 != 0:\n ans += mat[len(mat) // 2][len(mat) // 2]\n return ans\n\n def diagonalSum3(self, mat: list[list[int]]) -> int:\n\n xLen = len(mat)\n\n for i in mat:\n yLen = len(i)\n if yLen != xLen:\n return 0\n\n x1 = 0\n x2 = xLen - 1\n sum = 0\n\n for y in mat:\n sum += y[x1]\n if x1 != x2:\n sum += y[x2]\n\n # print(\"y[x1]:\", y[x1])\n # print(\"y[x2]\",y[x2])\n\n x1 += 1\n x2 -= 1\n\n return (sum)\n\n\nWynik = Solution()\nprint(\"Wynik: \",Wynik.diagonalSum([ [1,2,3],\n [4,5,6],\n [7,8,9]]))\nprint(\"Wynik: \",Wynik.diagonalSum2([[1,2,3],\n [4,5,6],\n [7,8,9]]))\nprint(\"Wynik: \",Wynik.diagonalSum3([[1,2,3],\n [4,5,6],\n [7,8,9]]))","repo_name":"Polakiewicz1991/python","sub_path":"LeetCode/Other/1572. Matrix Diagonal Sum.py","file_name":"1572. Matrix Diagonal Sum.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"551353255","text":"import torch\nimport argparse\nfrom utils import *\nfrom train_distributed import get_dataset\nimport os\nfrom collections import defaultdict\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--model_filenames', nargs='+', default=[f\"db_SGD_u3.png\"], help=\"model file names\")\nparser.add_argument('--num_samples', default=16, type=int, help=\"number of samples\")\nparser.add_argument('--shorten', type=int, help=\"shorten the number of models used for evaluation\")\nparser.add_argument('--normalize', action=\"store_true\", help=\"normalize matrices\")\nparser.add_argument('--make_permutation_invariant', action=\"store_true\", help=\"make the matrices permutation invariant\")\nparser.add_argument('--bin_count', type=int, default=10, help=\"number of bins to separate training loss\")\nparser.add_argument('--output_folder', default='output')\nparser.add_argument('--visualize_db', action='store_true', help=\"visualize decsion boundaries\")\nparser.add_argument('--testing_seed', type=int, default=100, help=\"seed for testing dataset\")\nparser.add_argument('--testing_count', type=int, default=16, help=\"number of examples for testing\")\nparser.add_argument('--worst_case', action='store_true', help=\"worst case test accuracy models\")\nparser.add_argument('--lower_loss', type=float, help=\"lower upper bound for plotting\", default=0)\nparser.add_argument('--suffix', default='')\n\nargs = parser.parse_args()\n\nmodels_list = []\noptimizers = []\nvalid_model_filenames = []\n\nfor model_filename in args.model_filenames:\n try:\n models_dict = torch.load(model_filename)\n except:\n continue\n valid_model_filenames.append(model_filename)\n if models_dict['config']['model.arch'] == \"lenet\":\n if 'feature_base_dim' in models_dict['kwargs']:\n models_dict['kwargs']['feature_dim'] = models_dict['kwargs']['feature_base_dim']\n del models_dict['kwargs']['feature_base_dim']\n models = LeNetModels(**models_dict[\"kwargs\"])\n elif models_dict['config']['model.arch'] == \"linear\":\n models = LinearModels(**models_dict[\"kwargs\"], device=torch.device(\"cpu\"))\n else:\n models = MLPModels(**models_dict[\"kwargs\"], device=torch.device(\"cpu\"))\n hidden_units = models_dict[\"kwargs\"][\"hidden_units\"]\n models.load_state_dict(models_dict[\"good_models_state_dict\"])\n if args.shorten:\n models = models.shorten(args.shorten)\n model_count = models.model_count\n model_configs_dict = models_dict[\"config\"]\n model_configs_dict = defaultdict(lambda : None, model_configs_dict)\n optimizer = model_configs_dict['optimizer.name']\n optimizers.append(optimizer)\n if args.make_permutation_invariant:\n models.make_permutation_invariant()\n if args.normalize:\n models.normalize()\n models_list.append(models)\n\n\ntrain_data, train_labels, test_data, test_labels, test_complete_data, test_complete_labels = get_dataset(\n name = model_configs_dict['dataset.name'],\n num_samples = model_configs_dict['dataset.num_samples'],\n seed = model_configs_dict['dataset.seed'],\n num_classes = model_configs_dict['dataset.mnistcifar.num_classes'],\n noise = model_configs_dict['dataset.kink.noise'],\n margin= model_configs_dict['dataset.kink.margin'],\n)\n\n_, _, test_data, test_labels, test_complete_data, test_complete_labels = get_dataset(\n name = model_configs_dict['dataset.name'],\n num_samples = 100,\n seed = model_configs_dict['dataset.seed'],\n num_classes = model_configs_dict['dataset.mnistcifar.num_classes'],\n noise = model_configs_dict['dataset.kink.noise'],\n margin= model_configs_dict['dataset.kink.margin'],\n)\ntrain_data, train_labels, test_data, test_labels, test_complete_data, test_complete_labels = train_data.cpu(), train_labels.cpu(), test_data.cpu(), test_labels.cpu(), test_complete_data.cpu(), test_complete_labels.cpu()\n\nloss_func = nn.CrossEntropyLoss(reduction='none')\ntrain_accs_list = []\ntest_accs_list = []\ntrain_losses_list = []\ntest_losses_list = []\n# TODO: separate evaluation with visualization code\n\n\nfor models_i, models in enumerate(models_list):\n if True:\n # get average margins\n model = models.get_model_subsets([0])\n pred = model(train_data).squeeze(1)\n top2logits = pred.topk(k=2, dim=1).values\n avgmargin = (top2logits[:, 0] - top2logits[:, 1]).mean()\n print(f\"average margin is : {avgmargin}\")\n\n with torch.no_grad():\n train_losses, train_accs = calculate_loss_acc(train_data, train_labels, models, loss_func, batch_size=1)\n test_losses, test_accs = calculate_loss_acc(test_complete_data, test_complete_labels, models, loss_func, batch_size=1)\n train_accs_list.append(train_accs)\n test_accs_list.append(test_accs)\n train_losses_list.append(train_losses)\n test_losses_list.append(test_losses)\n print(f\"model_filename: {valid_model_filenames[models_i]}\")\n print(f\"models: {models_i}\")\n print(f\"model_count: {models.model_count}\")\n print(f\"training acc: {train_accs.mean()}\")\n print(f\"overall test acc:{test_accs.mean(), test_accs.std()}\")\n print(\"minimum and max train losses\", train_losses.min().item(), train_losses.max().item())\n\n\n\nprint(\"by train loss\", \"-\"*20)\nintervals = torch.linspace(\n min([train_losses.min().item() for train_losses in train_losses_list]),\n max([train_losses.max().item() for train_losses in train_losses_list]),\n args.bin_count+1)\n\nfor models_i in range(len(models_list)):\n subset_models_list = []\n train_losses = train_losses_list[models_i]\n test_losses = test_losses_list[models_i]\n test_accs = test_accs_list[models_i]\n print(\"=\" * 20, f\"model_filename: {valid_model_filenames[models_i]}\")\n for i, (l, u) in enumerate(zip(intervals[:-1], intervals[1:])):\n idx = (train_losses >= l) & (train_losses <= u)\n if idx.sum()> 0:\n print(f\"bin{i}-interval:({l.item(): 0.3f},{u.item(): 0.3f}) count:{idx.sum().item(): 4.0f},\"\n f\"test accs (mean,min,max): \"\n f\"{test_accs[idx].mean().cpu().item(): 0.3f}\"\n f\",{test_accs[idx].min().cpu().item(): 0.3f},\"\n f\"{test_accs[idx].max().cpu().item(): 0.3f}, \"\n f\"test loss: {test_losses[idx].mean().cpu().item(): 0.3f}\")\n\nif args.visualize_db:\n subset_models_list = []\n for models_i, models in enumerate(models_list):\n train_losses = train_losses_list[models_i]\n test_losses = test_losses_list[models_i]\n test_accs = test_accs_list[models_i]\n for i, (l,u) in enumerate(zip(intervals[:-1], intervals[1:])):\n idx = (train_losses >= l) & (train_losses <= u - args.lower_loss)\n if (idx == 1).sum() == 0:\n continue\n print(f\"interval: {l.item(): 0.3f}, {u.item(): 0.3f}, count:{idx.sum().item()} \"\n f\"test accs (mean, min, max): {test_accs[idx].mean().cpu().item(): 0.3f} \"\n f\",{test_accs[idx].min().cpu().item(): 0.3f},{test_accs[idx].max().cpu().item(): 0.3f}\"\n f\"test loss: {test_losses[idx].mean().cpu().item(): 0.3f}\")\n\n subset_idx = torch.nonzero(idx).squeeze(1)\n if args.worst_case:\n subset_idx = (-test_accs).topk(9).indices\n print(i, subset_idx)\n if len(subset_idx) >= 9:\n subset_idx = subset_idx[:min(9, len(subset_idx))]\n subset_models = models.get_model_subsets(subset_idx)\n subset_models_list.append(subset_models)\n else:\n subset_models_list.append(models.get_model_subsets([0]*9))\n \n os.makedirs(args.output_folder, exist_ok=True)\n filename = f'db_{optimizer}_u{hidden_units}_s{model_configs_dict[\"dataset.num_samples\"]}_seed{model_configs_dict[\"dataset.seed\"]}_noise{model_configs_dict[\"dataset.kink.noise\"]}_{args.suffix}.png'\n filename = os.path.join(args.output_folder, filename)\n print(\"saving the file at \", filename)\n visualize_decision_boundary(subset_models_list,\n data=(train_data, train_labels),\n filename=filename)\n","repo_name":"Ping-C/optimizer","sub_path":"evaluate_minimas.py","file_name":"evaluate_minimas.py","file_ext":"py","file_size_in_byte":8096,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"16"} +{"seq_id":"7739401787","text":"import os\nfrom configparser import ConfigParser\nfrom typing import Optional\n\n\nclass Config(object):\n __DEFAULTS__ = {\n \"package\": \"packages.txt\",\n \"directory\": \".mbpkg\",\n \"jungle\": \"barrels.jungle\",\n \"manifest\": \"manifest.xml\",\n }\n\n def __init__(self, args):\n self.__args = args\n self.__config_file = None\n if args.config is not None:\n config_path = args.config\n else:\n config_path = \"mbgetcfg.ini\"\n\n if os.path.exists(config_path):\n config = ConfigParser()\n with open(config_path, \"r\") as f:\n data = f.read()\n\n config.read_string(data)\n\n if \"mbget\" in config:\n self.__config_file = config[\"mbget\"]\n\n self.__cached_vals = {}\n\n if args.token is not None:\n self.__token = args.token\n elif \"MBGET_GH_TOKEN\" in os.environ:\n self.__token = os.environ[\"MBGET_GH_TOKEN\"]\n else:\n self.__token = None\n\n @property\n def jungle(self) -> str:\n return self.__get_cached_config(\"jungle\")\n\n @property\n def package(self) -> str:\n return self.__get_cached_config(\"package\")\n\n @property\n def barrel_dir(self) -> str:\n return self.__get_cached_config(\"directory\")\n\n @property\n def token(self) -> Optional[str]:\n return self.__token\n\n @property\n def manifest(self) -> str:\n return self.__get_cached_config(\"manifest\")\n\n def prepare_project_dir(self) -> None:\n \"\"\"\n Put the project dir into a state where mbget can assume that all output\n locations are valid\n :return:\n \"\"\"\n self.__build_output_dir()\n\n def __build_output_dir(self):\n \"\"\"\n Builds the output dir if its required.\n\n :return:\n \"\"\"\n if not os.path.exists(self.barrel_dir):\n os.mkdir(self.barrel_dir)\n\n def __get_cached_config(self, param) -> str:\n if param not in self.__cached_vals:\n self.__add_config_to_cache(param)\n\n return self.__cached_vals[param]\n\n def __add_config_to_cache(self, param):\n if hasattr(self.__args, param) and getattr(self.__args, param) is not None:\n val = getattr(self.__args, param)\n elif self.__config_file is not None and param in self.__config_file:\n val = self.__config_file[param]\n else:\n val = self.__DEFAULTS__[param]\n\n self.__cached_vals[param] = val\n\n @staticmethod\n def open_file(name, mode, callback) -> None:\n with open(name, mode) as f:\n callback(f)\n","repo_name":"gcaufield/MonkeyPack","sub_path":"mbget/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"17071956070","text":"#!/usr/bin/env python3\n\nimport json\nimport re\nimport os.path\nimport sys\nfrom collections import OrderedDict\n\ndef row_to_md(row):\n md = \"\"\n if row.get(\"link\",\"\") != \"\":\n md += \"[{}]({})\".format(row[\"name\"], row[\"link\"])\n # for key, value in row:\n # if key in [\"name\", \"link\", \"id\", \"_xml\", \"_links\"]:\n # continue\n # md += '{}'.format(value)\n if \"text\" in row:\n md += row[\"text\"]\n md += \"\\n\\n\"\n return md\n\n\ninfile = sys.argv[1]\nrows = []\n\nprint(\"reading\", infile)\nwith open(infile) as f:\n rows = json.load(f)[\"rows\"]\n print(len(rows), \"rows\")\n\nsections = OrderedDict()\nbody = \"\"\nitems = []\n\nfor row in rows:\n del row[\"id\"]\n del row[\"_xml\"]\n del row[\"_links\"]\n del row[\"title\"]\n\n # if row['projecttitle'] == '':\n # body += row['summary'] + \"\\n\\n\"\n # continue\n\n\n # if row['title'] == '':\n # body += row['fulltext'] + \"\\n\\n\"\n # continue\n\n section = row.get(\"section\", \"\")\n if section == '' and 'text' in row:\n body += row_to_md(row)\n continue\n\n del row[\"section\"]\n if not section in sections:\n print(\"creating section\", section)\n sections[section] = []\n\n sections[section].append(row)\n\n items.append(row)\n\nfor title, section in sections.items():\n if section[0].get(\"link\", \"\") != \"\":\n body += \"### {}\\n\\n\".format(title)\n for item in section:\n body += row_to_md(item)\n\ndata = {\n \"title\": infile.replace(\".json\", \"\").replace(\"-\", \" \").title(),\n \"path\": infile.replace(\".json\", \"\"),\n \"sections\": sections\n # \"items\": items\n}\noutfile = infile.replace(\".json\", \".md\")\nwith open(outfile, \"w\") as of:\n of.write(\"---\\n\")\n for key, value in data.items():\n print(\"writing section\", key)\n of.write(key)\n of.write(\": \")\n of.write(json.dumps(value))\n of.write(\"\\n\")\n of.write(\"---\\n\")\n of.write(body)\n","repo_name":"uts-magic-lab/www.themagiclab.org","sub_path":"convert_to_frontmatter.py","file_name":"convert_to_frontmatter.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"23844975070","text":"from django.urls import path, re_path\nfrom free import views\n\n\napp_name = 'free'\nurlpatterns = [\n\n #Example: /blog/\n path('',views.PostLV.as_view(), name= 'index'),\n\n #/blog/tag/\n path('tag/', views.TagCloudTV.as_view(), name='tag_cloud'),\n\n #/blog/tag/tagname/\n path('tag//', views.TaggedObjectLV.as_view(), name='tagged_object_list'),\n \n #Example: /blog/post/ (same as /blog/)\n path('post/', views.PostLV.as_view(), name='post_list'),\n\n #/blog/post/django-example/\n path('post/', views.PostDV.as_view(), name='post_detail'),\n\n #/blog/archive/\n path('archive/', views.PostAV.as_view(), name='post_archive'),\n\n #/blog/archive/2019/\n path('archive//', views.PostYAV.as_view(), name='post_year_archive'),\n\n #/blog/archive/2019/nov\n path('archive///', views.PostMAV.as_view(), name='post_month_archive'),\n\n #/blog/archive/2019/nov/10/\n path('archive//,//', views.PostDAV.as_view(), name='post_day_archive'),\n\n #/blog/archive/today/\n path('archive/today/', views.PostTAV.as_view(), name='post_today_archive'),\n\n path('search/', views.SearchFormView.as_view(), name='search',),\n path('add/',views.PostCreateView.as_view(), name=\"add\",),\n path('change/', views.PostChangeLV.as_view(), name=\"change\",),\n \n path('update//', views.PostUpdateView.as_view(), name=\"update\",),\n path('/delete/', views.PostDeleteView.as_view(), name=\"delete\",),\n path('comment//delete/', views.CommentDeleteView.as_view(), name=\"comment_delete\",),\n path('blogpost-like/', views.PostLike, name=\"blogpost_like\"), \n\n] ","repo_name":"alstjr0307/Web_StockStorage","sub_path":"free/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5431411280","text":"# A Budget class which instantiates objects based on different budget categories\nclass Budget:\n def __init__(self, food, clothing, entertainment):\n self.food = food\n self.cloth = clothing\n self.entertain = entertainment\n\n # menu method - a user friendly dashboard for transactions on the budget app\n def menu(self):\n menu_choice = int(input(\"W E L C O M E ! \\n Please pick an option \\n 1.) Deposit \\n 2.) Withdrawal \\n 3.) \"\n \"Check Category Balances \\n 4.) Transfer \\n\"))\n\n if menu_choice == 1:\n self.deposit()\n elif menu_choice == 2:\n self.withdraw()\n elif menu_choice == 3:\n self.balance()\n elif menu_choice == 4:\n self.transfer()\n else:\n print(\"Wrong input, Try again\")\n self.menu()\n\n # Deposit method - to add cash into a budget category\n def deposit(self):\n deposit_amount = int(input(\"W E L C O M E ! \\n How Much would you like to deposit? \\n \"))\n deposit_choice = int(input('''\n Which category would you like to deposit to? \\n \n 1.) for food \\n \n 2.) for clothing \\n \n 3.) for entertainment \\n\n 4.) exit \\n\n '''))\n\n if deposit_choice == 1:\n self.food += deposit_amount\n elif deposit_choice == 2:\n self.cloth += deposit_amount\n elif deposit_choice == 3:\n self.entertain += deposit_amount\n elif deposit_choice == 4:\n self.menu()\n else:\n print(\"Wrong input, Try again\")\n self.deposit()\n print(\"Transaction Successful!\")\n self.balance()\n\n # withdraw method - to remove cash from a budget category\n def withdraw(self):\n withdrawal_amount = int(input(\"W E L C O M E ! \\n How Much would you like to withdraw? \\n \"))\n withdrawal_source = int(input(''' \n Which category would you like to withdraw from? \\n \n 1.) for food \\n \n 2.) for clothing \\n \n 3.) for entertainment \\n\n 4.) exit \\n\n '''))\n\n if withdrawal_source == 1:\n self.food -= withdrawal_amount\n elif withdrawal_source == 2:\n self.cloth -= withdrawal_amount\n elif withdrawal_source == 3:\n self.entertain -= withdrawal_amount\n elif withdrawal_source == 4:\n self.menu()\n else:\n print(\"Wrong input, Try again\")\n self.withdraw()\n self.balance()\n\n # balance method - to compute balances on all budget categories\n def balance(self):\n print(\"Your Balances are...\")\n print(f\"Food --> {self.food} \\n Clothing --> {self.cloth} \\n Entertainment --> {self.entertain}\")\n self.proceed()\n\n # to perform another transaction\n def proceed(self):\n to_continue = input('Would you like to perform another function (y/n)... ')\n if to_continue == 'y':\n self.menu()\n elif to_continue == 'n':\n print('Good Bye!')\n exit()\n else:\n print('Wrong input!\\n Enter (y/n)')\n\n # Transfer method - to transfer balance between budget categories\n def transfer(self):\n transfer_from = int(input(\"Category to transfer from ... \\n 1 Food \\n 2 Cloth \\n 3 Entertainment \\n\"))\n transfer_amount = int(input(\"Enter Amount to transfer\"))\n transfer_to = int(input(\"Category to transfer To ... \\n 1 Food \\n 2 Cloth \\n 3 Entertainment \\n\"))\n\n if transfer_from == 1:\n self.food -= transfer_amount\n if transfer_to == 2:\n self.cloth += transfer_amount\n elif transfer_to == 3:\n self.entertain += transfer_amount\n else:\n self.transfer()\n print(f\"You have Transferred {transfer_amount} successfully\")\n self.balance()\n\n elif transfer_from == 2:\n self.cloth -= transfer_amount\n if transfer_to == 1:\n self.food += transfer_amount\n elif transfer_to == 3:\n self.entertain += transfer_amount\n else:\n self.transfer()\n print(f\"You have Transferred {transfer_amount} successfully\")\n self.balance()\n\n elif transfer_from == 3:\n self.entertain -= transfer_amount\n if transfer_to == 2:\n self.cloth += transfer_amount\n elif transfer_to == 1:\n self.food += transfer_amount\n else:\n self.transfer()\n print(f\"You have Transferred {transfer_amount} successfully\")\n self.balance()\n\n else:\n print(\"Wrong Input\")\n self.transfer()\n\n\n# instantiating the class with amounts for categories food, cloth and entertainment\nweekend_shopping = Budget(500, 4000, 3000)\n\nweekend_shopping.menu()\n","repo_name":"siegfred14/budgetapp","sub_path":"budgetapp.py","file_name":"budgetapp.py","file_ext":"py","file_size_in_byte":5281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70104592327","text":"# -*- coding: utf-8 -*-\nfrom odoo import models, fields, api, tools, _\nfrom odoo.exceptions import UserError\n\n\nclass PMSEquipment(models.Model):\n _name = 'pms.equipment'\n _inherit = ['portal.mixin', 'mail.thread', 'mail.activity.mixin']\n _description = \"Equipments\"\n\n def _get_name(self):\n return self.name\n\n def _get_property(self):\n return self.env.user.current_property_id\n\n equipment_type_id = fields.Many2one(\"pms.equipment.type\",\n string=\"Equipment Type\",\n track_visibility=True,\n required=True)\n name = fields.Char(\"Serial No\",\n required=True,\n track_visibility=True,\n help='Serial No of Equipment')\n model = fields.Char(\"Model\",\n required=True,\n track_visibility=True,\n help='Model of Equipment.')\n manufacturer = fields.Char(\"Manufacturer\", track_visibility=True)\n ref_code = fields.Char(\"Reference Code\", track_visibility=True)\n property_id = fields.Many2one(\"pms.properties\",\n \"Property\",\n default=_get_property,\n required=True,\n track_visibility=True)\n digit = fields.Integer(\n \"Digit\",\n track_visibility=True,\n help='The maximun capicity to display on equipment screen(esp. meter)')\n count_facility = fields.Integer(\"Count Unit\",\n compute=\"_get_count_facility\")\n roll_over_type = fields.Selection(\n [('DIGITROLLOVER', 'Digit RollOver'),\n ('UNITROLLOVER', 'Unit RollOver'),\n ('CURRENTROLLOVER', 'Current RollOver')],\n \"Rollover Type\",\n help='Which method will be use if equipment roll over.')\n utilities_type = fields.Many2one(\"pms.utilities.type\", \"Utiliteis Type\")\n utilities_code = fields.Char(\"Utiliteis Code\",\n related=\"utilities_type.code\")\n current_unit_type = fields.Selection([('watt', \"Watts\"),\n ('kilowatt', \"Kilowatts\"),\n ('megawatt', \"Megawatts\")],\n string=\"Current Unit Type\")\n meter_type = fields.Selection([('normal', 'Normal'),\n ('share-meter', 'Share Meter')],\n string=\"Meter Type\")\n power_system = fields.Selection([('single-phase', 'Single Phase'),\n ('three-phase', 'Three Phase')],\n string=\"Power System\")\n meter_template_id = fields.Many2one(\"pms.meter.template\",\n \"Meter Templates\")\n equipment_line = fields.One2many(\"pms.equipment.line\",\n \"equipment_id\",\n string=\"Equipment Lines\")\n\n @api.multi\n def _get_count_facility(self):\n count = 0\n unit_ids = self.env['pms.facilities'].search([('utilities_no', '=',\n self.id),\n ('inuse', '=', True)])\n for unit in unit_ids:\n self.count_facility += 1\n\n @api.multi\n def action_facilities(self):\n facility_ids = self.env['pms.facilities'].search([\n ('utilities_no', '=', self.id), ('status', '=', True)\n ])\n\n action = self.env.ref(\n 'property_management_system.action_facilities_all').read()[0]\n if len(facility_ids) > 1:\n action['domain'] = [('id', 'in', facility_ids.ids)]\n elif len(facility_ids) == 1:\n action['views'] = [(self.env.ref(\n 'property_management_system.view_facilities_form').id, 'form')]\n action['res_id'] = facility_ids.ids[0]\n else:\n action = {'type': 'ir.actions.act_window_close'}\n return action\n\n @api.model\n def create(self, values):\n equip_id = self.search([('name', '=', values['name'])])\n if equip_id:\n raise UserError(_(\"%s is already existed\" % values['name']))\n return super(PMSEquipment, self).create(values)\n\n @api.multi\n def write(self, vals):\n if 'name' in vals:\n equip_id = self.search([('name', '=', vals['name'])])\n if equip_id:\n raise UserError(_(\"%s is already existed\" % vals['name']))\n return super(PMSEquipment, self).write(vals)\n\n def _compute_line_data_for_template_change(self, line):\n return {'utilities_supply_id': line.utilities_supply_id.id}\n\n @api.onchange('meter_template_id')\n def onchange_meter_template_id(self):\n template = self.meter_template_id.with_context(\n lang=self._context.get('lang'))\n equipment_lines = [(5, 0, 0)]\n data = []\n if template:\n if len(template.template_line) > 0:\n for l in template.template_line:\n data = self._compute_line_data_for_template_change(l)\n equipment_lines.append((0, 0, data))\n self.equipment_line = equipment_lines\n self.utilities_type = template.utilities_type\n self.current_unit_type = template.current_unit_type\n self.meter_type = template.meter_type\n self.power_system = template.power_system\n self.digit = template.digit\n\n @api.onchange('digit')\n def onchange_digit(self):\n if self.digit and self.name:\n facility_id = self.env['pms.facilities'].search([\n ('utilities_no', '=', self.name), ('inuse', '=', True)\n ])\n if facility_id.facilities_line:\n for fl in facility_id.facilities_line:\n spunitfl_ids = self.env[\n 'pms.space.unit.facility.lines'].search([\n ('facility_id', '=', facility_id.id),\n ('facility_line_id', '=', fl.id),\n ('inuse', '=', True)\n ])\n spunitfl_ids.write({'digit': self.digit})\n\n\nclass PMSEquipmentType(models.Model):\n _name = 'pms.equipment.type'\n _description = 'Equipment Types'\n _order = 'sequence,name'\n\n name = fields.Char(\"Equipment Type\", required=True, track_visibility=True)\n description = fields.Text(\"Description\", track_visibility=True)\n active = fields.Boolean(default=True, track_visibility=True)\n sequence = fields.Integer(track_visibility=True)\n index = fields.Integer(compute='_compute_index')\n\n @api.one\n def _compute_index(self):\n cr, uid, ctx = self.env.args\n self.index = self._model.search_count(\n cr, uid, [('sequence', '<', self.sequence)], context=ctx) + 1\n\n @api.model\n def create(self, values):\n equip_type_id = self.search([('name', '=', values['name'])])\n if equip_type_id:\n raise UserError(_(\"%s is already existed\" % values['name']))\n return super(PMSEquipmentType, self).create(values)\n\n @api.multi\n def write(self, vals):\n if 'name' in vals:\n equip_type_id = self.search([('name', '=', vals['name'])])\n if equip_type_id:\n raise UserError(_(\"%s is already existed\" % vals['name']))\n return super(PMSEquipmentType, self).write(vals)\n\n\nclass PMSEquipmentLine(models.Model):\n _name = 'pms.equipment.line'\n _description = 'Equipment Lines'\n\n utilities_supply_id = fields.Many2one(\"pms.utilities.supply\",\n \"Utilities Supply\")\n equipment_id = fields.Many2one(\"pms.equipment\", \"Equipment\")","repo_name":"dionisiotorres/propertymanagememt","sub_path":"property_management_system/models/pms_equipment.py","file_name":"pms_equipment.py","file_ext":"py","file_size_in_byte":7877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5485434746","text":"import sys\n\ninput = sys.stdin.readline\n\nN = int(input())\n\ntimes = [list(map(int, input().split())) for _ in range(N)]\n\ntimes.sort(key=lambda x: (x[1], x[0]))\nanswer = []\n\nfor t in times:\n if not answer:\n answer.append(t)\n continue\n\n last = answer[len(answer) - 1]\n if t[0] >= last[1]:\n answer.append(t)\n\nprint(len(answer))","repo_name":"dongqui/problemSolving","sub_path":"백준/Silver/1931. 회의실 배정/회의실 배정.py","file_name":"회의실 배정.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"33456070788","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom typing import Tuple\n\n\ndef parse_wld(wld: list, played_first: bool = True) -> Tuple[list, float]:\n \"\"\"\n Parse the wld list to get the win percentage for a given player\n\n :param wld: list of tuples (result, played_first)\n :param played_first: player to get win percentage for \n\n :return: list of results for given player, win percentage\n \"\"\"\n turn_wld = [x[0] for x in wld if x[1] is played_first]\n win_pct = 100 * \\\n np.sum([1 if x == 'w' else 0 for x in turn_wld]) / len(turn_wld)\n\n return turn_wld, win_pct\n\n\ndef plot_wld(wld: list) -> Tuple[plt.Figure, plt.Axes]:\n \"\"\"\n Plot the win/loss/draw results for both players\n\n \n :param wld: list of tuples (result, played_first)\n\n :return: figure, axes\n \"\"\"\n def subplot(wld_arr, ax):\n\n ws = np.cumsum([1 if x == 'w' else 0 for x in wld_arr])\n ds = np.cumsum([1 if x == 'l' else 0 for x in wld_arr])\n ls = np.cumsum([1 if x == 'd' else 0 for x in wld_arr])\n\n ax.plot(ws, label='w')\n ax.plot(ds, label='d')\n ax.plot(ls, label='l')\n\n return ax\n\n first_wld, first_win_pct = parse_wld(wld, played_first=True)\n second_wld, second_win_pct = parse_wld(wld, played_first=False)\n\n fig, axs = plt.subplots(1, 2, sharey=True)\n\n axs[0] = subplot(first_wld, axs[0])\n axs[1] = subplot(second_wld, axs[1])\n\n axs[0].set_title(f'Played First - {first_win_pct:.2f}%')\n axs[1].set_title(f'Played Second - {second_win_pct:.2f}%')\n\n axs[0].set_xlabel('Games')\n plt.legend()\n\n return fig, axs\n","repo_name":"georgec123/aiml","sub_path":"src/Q1/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36020907681","text":"def countzero():\n count = 0\n arr_len = len(a)\n for k in range(arr_len):\n if a[k] == \"0\":\n count = arr_len - k\n print(count)\n return\n elif k == arr_len-1:\n print(count)\nif __name__==\"__main__\":\n t = int(input(\"t \"))\n for i in range(t):\n a = []\n n = int(input(\"n \"))\n for j in range(n):\n a.append(input(\"input on 1 and 0 \"))\n countzero()","repo_name":"aarjukhicher/geeksprogram","sub_path":"Array/count_zeros.py","file_name":"count_zeros.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"16836987168","text":"#-------------------------------------------------------------------------------\n# Author: Lukasz Janyst \n# Date: 14.06.2017\n#-------------------------------------------------------------------------------\n\nimport random\nimport cv2\n\nimport sys,os\nsys.path.append(os.getcwd())\nsys.path.append(\"..\")\n\nimport numpy as np\nimport tensorflow as tf\n\nimport glob\nfrom augmentation import rotate_one, rotate_both, flip_one, flip_both, blur_one, blur_both, illumination_change_both\n\n# BGR color\nTRAIN_DATA_MEAN = [72.5099, 73.4619, 70.0471]\nVGG_MEAN = [103.939, 116.779, 123.68]\nMEAN_40X_32S = [103.939, 116.779, 123.68]\nMEAN_40X_64S = [196.9689, 166.0824, 204.7978]\nMEAN_40X_32R = [103.939, 116.779, 123.68]\nMEAN_40X_64R = [103.939, 116.779, 123.68]\n\nclass SourceClassification:\n #---------------------------------------------------------------------------\n def __init__(self):\n # self.original_size = (700, 460)\n # self.scaled_size = (350, 230)\n self.image_size = (64, 64)\n\n self.num_classes = 2\n self.class_name = {0: 'B', # Benign\n 1: 'M'} # Malignant\n\n self.num_training = None\n self.num_validation = None\n self.train_generator = None\n self.valid_generator = None\n\n #---------------------------------------------------------------------------\n def load_data(self, data_dir, valid_fraction):\n \"\"\"\n Load the data and make the generators\n :param data_dir: the directory where the dataset's file are stored\n :param valid_fraction: what franction of the dataset should be used\n as a validation sample\n \"\"\"\n\n train_paths = glob.glob('{:s}/*.png'.format(data_dir), recursive=True)\n\n num_images = len(train_paths)\n if num_images == 0:\n raise RuntimeError('No data files found in ' + data_dir)\n\n\n # for path in train_paths:\n # print(path.strip('/').split('\\\\')[-2])\n # input(\"Press Enter to continue...\")\n\n label_list = {\n os.path.basename(path): path.strip('/').split('_')[-2]\n for path in train_paths}\n self.train_label_list = label_list\n\n # print(self.train_label_list[os.path.basename(train_paths[0])])\n\n self.num_training = len(train_paths)\n self.train_generator = self.batch_generator(train_paths, 0)\n\n #---------------------------------------------------------------------------\n def batch_generator(self, image_paths, batch_type):\n def gen_batch(batch_size, names=False):\n random.shuffle(image_paths)\n for offset in range(0, len(image_paths), batch_size):\n files = image_paths[offset:offset+batch_size]\n\n images = []\n labels = []\n names_images = []\n\n counting = 0;\n while (len(files) < batch_size):\n files.append(image_paths[counting])\n counting += 1\n\n for image_file in files:\n if (batch_type == 0):\n label_type = self.train_label_list[os.path.basename(image_file)]\n else:\n label_type = self.val_label_list[os.path.basename(image_file)]\n\n image = cv2.imread(image_file)*1.0 - MEAN_40X_64S\n label = make_label_tensor(label_type)\n\n # image = flip_one(image, p=0.5)\n # image = rotate_one(image, p=0.5, ignore_label=1)\n #image, label = blur_both(image, label, p=0.5)\n\n images.append(image.astype(np.float32))\n labels.append(label)\n\n if names:\n names_images.append(image_file)\n\n if names:\n yield np.array(images), np.array(labels), \\\n names_images\n else:\n yield np.array(images), np.array(labels)\n\n return gen_batch\n\n#-------------------------------------------------------------------------------\ndef make_label_tensor(label_type):\n if (label_type == 'B'):\n return np.array([1., 0.])\n else:\n return np.array([0., 1.])\n\n#-------------------------------------------------------------------------------\ndef get_source_classification():\n return SourceClassification()\n","repo_name":"ivo-gilles/breast-cancer-classification","sub_path":"get_source.py","file_name":"get_source.py","file_ext":"py","file_size_in_byte":4447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2217084559","text":"import os\nimport tensorflow as tf\nimport numpy as np\nimport math\n\ndef deprocess(image):\n with tf.name_scope(\"deprocess\"):\n # [-1, 1] => [0, 1]\n return (image + 1) / 2\n\n# Normalizes a tensor troughout the Channels dimension (BatchSize, Width, Height, Channels)\n# Keeps 4th dimension to 1. Output will be (BatchSize, Width, Height, 1).\ndef tf_Normalize(tensor):\n Length = tf.sqrt(tf.reduce_sum(tf.square(tensor), axis = -1, keep_dims=True))\n return tf.div(tensor, Length)\n\n# Computes the dot product between 2 tensors (BatchSize, Width, Height, Channels)\n# Keeps 4th dimension to 1. Output will be (BatchSize, Width, Height, 1).\ndef tf_DotProduct(tensorA, tensorB):\n return tf.reduce_sum(tf.multiply(tensorA, tensorB), axis = -1, keep_dims=True)\n\n#Physically based lamp attenuation\ndef tf_lampAttenuation_pbr(distance):\n return 1.0 / tf.square(distance)\n\n#Clip values between min an max\ndef squeezeValues(tensor, min, max):\n return tf.clip_by_value(tensor, min, max)\n\n# Generate an array grid between -1;1 to act as the \"coordconv\" input layer (see coordconv paper)\ndef generateCoords(inputShape):\n crop_size = inputShape[-2]\n firstDim = inputShape[0]\n\n Xcoords= tf.expand_dims(tf.lin_space(-1.0, 1.0, crop_size), axis=0)\n Xcoords = tf.tile(Xcoords,[crop_size, 1])\n Ycoords = -1 * tf.transpose(Xcoords) #put -1 in the bottom of the table\n Xcoords = tf.expand_dims(Xcoords, axis = -1)\n Ycoords = tf.expand_dims(Ycoords, axis = -1)\n coords = tf.concat([Xcoords, Ycoords], axis=-1)\n coords = tf.expand_dims(coords, axis = 0)#Add dimension to support batch size and nbRenderings should now be [1, 256, 256, 2].\n coords = tf.tile(coords, [firstDim, 1, 1, 1]) #Add the proper dimension here for concat\n return coords\n\n# Generate an array grid between -1;1 to act as each pixel position for the rendering.\ndef generateSurfaceArray(crop_size, pixelsToAdd = 0):\n totalSize = crop_size + (pixelsToAdd * 2)\n surfaceArray=[]\n XsurfaceArray = tf.expand_dims(tf.lin_space(-1.0, 1.0, totalSize), axis=0)\n XsurfaceArray = tf.tile(XsurfaceArray,[totalSize, 1])\n YsurfaceArray = tf.transpose(XsurfaceArray) #put -1 in the bottom of the table\n XsurfaceArray = tf.expand_dims(XsurfaceArray, axis = -1)\n YsurfaceArray = tf.expand_dims(YsurfaceArray, axis = -1)\n\n surfaceArray = tf.concat([XsurfaceArray, YsurfaceArray, tf.zeros([totalSize, totalSize,1], dtype=tf.float32)], axis=-1)\n surfaceArray = tf.expand_dims(tf.expand_dims(surfaceArray, axis = 0), axis = 0)#Add dimension to support batch size and nbRenderings\n return surfaceArray\n\n#Adds a little bit of noise\ndef addNoise(renderings):\n shape = tf.shape(renderings)\n stddevNoise = tf.exp(tf.random_normal((), mean = np.log(0.005), stddev=0.3))\n noise = tf.random_normal(shape, mean=0.0, stddev=stddevNoise)\n return renderings + noise\n\n#Generate a random direction on the upper hemisphere with gaps on the top and bottom of Hemisphere. Equation is described in the Global Illumination Compendium (19a)\ndef tf_generate_normalized_random_direction(batchSize, nbRenderings, lowEps = 0.001, highEps = 0.05):\n r1 = tf.random_uniform([batchSize, nbRenderings, 1], 0.0 + lowEps, 1.0 - highEps, dtype=tf.float32)\n r2 = tf.random_uniform([batchSize, nbRenderings, 1], 0.0, 1.0, dtype=tf.float32)\n r = tf.sqrt(r1)\n phi = 2 * math.pi * r2\n #min alpha = atan(sqrt(1-r^2)/r)\n x = r * tf.cos(phi)\n y = r * tf.sin(phi)\n z = tf.sqrt(1.0 - tf.square(r))\n finalVec = tf.concat([x, y, z], axis=-1) #Dimension here should be [batchSize,nbRenderings, 3]\n return finalVec\n \n#Generate a distance to compute for the specular renderings (as position is important for this kind of renderings)\ndef tf_generate_distance(batchSize, nbRenderings):\n gaussian = tf.random_normal([batchSize, nbRenderings, 1], 0.5, 0.75, dtype=tf.float32) # parameters chosen empirically to have a nice distance from a -1;1 surface.\n return (tf.exp(gaussian))\n \n#generate the diffuse rendering for the loss computation\ndef tf_generateDiffuseRendering(batchSize, nbRenderings, targets, outputs, renderer):\n currentViewPos = tf_generate_normalized_random_direction(batchSize, nbRenderings, lowEps = 0.001, highEps = 0.1)\n currentLightPos = tf_generate_normalized_random_direction(batchSize, nbRenderings, lowEps = 0.001, highEps = 0.1)\n\n wi = currentLightPos\n wi = tf.expand_dims(wi, axis=2)\n wi = tf.expand_dims(wi, axis=2)\n\n wo = currentViewPos\n wo = tf.expand_dims(wo, axis=2)\n wo = tf.expand_dims(wo, axis=2)\n\n #Add a dimension to compensate for the nb of renderings\n #targets = tf.expand_dims(targets, axis=-2)\n #outputs = tf.expand_dims(outputs, axis=-2)\n\n #Here we have wi and wo with shape [batchSize, height,width, nbRenderings, 3]\n renderedDiffuse = renderer.tf_Render(targets,wi,wo, None, \"diffuse\", useAugmentation = False, lossRendering = True)[0]\n\n renderedDiffuseOutputs = renderer.tf_Render(outputs,wi,wo, None, \"\", useAugmentation = False, lossRendering = True)[0]#tf_Render_Optis(outputs,wi,wo)\n #renderedDiffuse = tf.Print(renderedDiffuse, [tf.shape(renderedDiffuse)], message=\"This is renderings targets Diffuse: \", summarize=20)\n #renderedDiffuseOutputs = tf.Print(renderedDiffuseOutputs, [tf.shape(renderedDiffuseOutputs)], message=\"This is renderings outputs Diffuse: \", summarize=20)\n return [renderedDiffuse, renderedDiffuseOutputs]\n\n#generate the specular rendering for the loss computation\ndef tf_generateSpecularRendering(batchSize, nbRenderings, surfaceArray, targets, outputs, renderer):\n currentViewDir = tf_generate_normalized_random_direction(batchSize, nbRenderings, lowEps = 0.001, highEps = 0.1)\n currentLightDir = currentViewDir * tf.expand_dims([-1.0, -1.0, 1.0], axis = 0)\n #Shift position to have highlight elsewhere than in the center.\n currentShift = tf.concat([tf.random_uniform([batchSize, nbRenderings, 2], -1.0, 1.0), tf.zeros([batchSize, nbRenderings, 1], dtype=tf.float32) + 0.0001], axis=-1)\n\n currentViewPos = tf.multiply(currentViewDir, tf_generate_distance(batchSize, nbRenderings)) + currentShift\n currentLightPos = tf.multiply(currentLightDir, tf_generate_distance(batchSize, nbRenderings)) + currentShift\n\n currentViewPos = tf.expand_dims(currentViewPos, axis=2)\n currentViewPos = tf.expand_dims(currentViewPos, axis=2)\n\n currentLightPos = tf.expand_dims(currentLightPos, axis=2)\n currentLightPos = tf.expand_dims(currentLightPos, axis=2)\n\n wo = currentViewPos - surfaceArray\n wi = currentLightPos - surfaceArray\n\n #targets = tf.expand_dims(targets, axis=-2)\n #outputs = tf.expand_dims(outputs, axis=-2)\n #targets = tf.Print(targets, [tf.shape(targets)], message=\"This is targets in specu renderings: \", summarize=20)\n renderedSpecular = renderer.tf_Render(targets,wi,wo, None, \"specu\", useAugmentation = False, lossRendering = True)[0]\n renderedSpecularOutputs = renderer.tf_Render(outputs,wi,wo, None, \"\", useAugmentation = False, lossRendering = True)[0]\n #tf_Render_Optis(outputs,wi,wo, includeDiffuse = a.includeDiffuse)\n\n #renderedSpecularOutputs = tf.Print(renderedSpecularOutputs, [tf.shape(renderedSpecularOutputs)], message=\"This is renderings outputs Specular: \", summarize=20)\n return [renderedSpecular, renderedSpecularOutputs]\n\ndef tf_generateTopRendering(batchSize, nbRenderings, surfaceArray, targets, outputs, renderer):\n fov = tf.random_uniform([batchSize, nbRenderings, 1], 25.0, 35.0)\n dist = 1 / tf.tan(fov / 2 / 180 * np.pi)\n xy = tf.random_uniform([batchSize, nbRenderings, 2], -1.0, 1.0)\n pos = tf.concat([xy, dist], axis=-1)\n\n pos = tf.expand_dims(pos, axis=2)\n pos = tf.expand_dims(pos, axis=2)\n\n wi = pos - surfaceArray\n\n #targets = tf.expand_dims(targets, axis=-2)\n #outputs = tf.expand_dims(outputs, axis=-2)\n #targets = tf.Print(targets, [tf.shape(targets)], message=\"This is targets in specu renderings: \", summarize=20)\n renderedTop = renderer.tf_Render(targets,wi,wi, None, \"specu\", useAugmentation = False, lossRendering = True)[0]\n renderedTopOutputs = renderer.tf_Render(outputs,wi,wi, None, \"\", useAugmentation = False, lossRendering = True)[0]\n #tf_Render_Optis(outputs,wi,wo, includeDiffuse = a.includeDiffuse)\n\n #renderedSpecularOutputs = tf.Print(renderedSpecularOutputs, [tf.shape(renderedSpecularOutputs)], message=\"This is renderings outputs Specular: \", summarize=20)\n return [renderedTop, renderedTopOutputs]\n\n#Put the normals and roughness back to 3 channel for easier processing.\ndef deprocess_outputs(outputs):\n partialOutputedNormals = outputs[:,:,:,0:2] * 3.0 #The multiplication here gives space to generate direction with angle > pi/4\n outputedDiffuse = outputs[:,:,:,2:5]\n outputedRoughness = outputs[:,:,:,5]\n outputedSpecular = outputs[:,:,:,6:9]\n normalShape = tf.shape(partialOutputedNormals)\n newShape = [normalShape[0], normalShape[1], normalShape[2], 1]\n #normalShape[-1] = 1\n tmpNormals = tf.ones(newShape, tf.float32)\n\n normNormals = tf_Normalize(tf.concat([partialOutputedNormals, tmpNormals], axis = -1))\n outputedRoughnessExpanded = tf.expand_dims(outputedRoughness, axis = -1)\n return tf.concat([normNormals, outputedDiffuse, outputedRoughnessExpanded, outputedRoughnessExpanded, outputedRoughnessExpanded, outputedSpecular], axis=-1)\n","repo_name":"ywjleft/SVBRDF_from_Video","sub_path":"utils/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":9383,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"25631866852","text":"import os\nimport sys\n\nsys.path.append(os.getcwd())\n\nimport gc\nimport time\nimport torch\nimport warnings\nimport numpy as np\n\nfrom options import TrainArgs\nfrom tabulate import tabulate\nfrom torch.utils.data import random_split\n\n# importing useful function/methods\nfrom functional.saving import Checkpointer\nfrom functional.metric import AverageMeter\nfrom functional.time import get_process_time\nfrom functional.data.dataloader import Dataloader\nfrom functional.visualizer import VisdomVisualizer\nfrom functional.event_tracker import logger as Logger\n\nfrom keyword_information_extraction.configs import configs\nfrom keyword_information_extraction.data.dataset import SROIE2019Dataset, TrainBatchCollator\nfrom keyword_information_extraction.model.charlm import CharacterLevelCNNHighwayBiLSTM as CharLM\nfrom keyword_information_extraction.utils.misc import multilabel_confusion_matrix, check_denominator_consistency\n\n# Put warnings to silence if any.\nwarnings.filterwarnings(\"ignore\")\n\ntrain_args = TrainArgs(description=\"Keyword Information Extraction: training\")\nparser = train_args.get_parser()\nargs, _ = parser.parse_known_args()\n\n\ndef main():\n # One can comment the line below.\n # It is important to note it is useful when some nasty errors like the NaN loss show up.\n torch.autograd.set_detect_anomaly(True)\n\n # A boolean to check whether the user is able to use cuda or not.\n use_cuda = torch.cuda.is_available() and args.use_cuda\n\n output_dir = os.path.normpath(configs.OUTPUT_DIR)\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir)\n\n # The declaration and initialization of the logger.\n logger = Logger(name=\"CharLM: training phase\", output_dir=output_dir, log_filename=\"training-log\")\n\n # The declaration and initialization of the checkpointer.\n checkpointer = Checkpointer(logger=logger, output_dir=output_dir)\n\n # The declaration of the training dataloader arguments.\n training_dataloader_args = dict(configs.DATALOADER.TRAINING)\n\n # Adding the collate_fn.\n training_dataloader_args[\"collate_fn\"] = TrainBatchCollator(class_labels_padding_value=configs.LOSS.IGNORE_INDEX)\n\n # The declaration and tensor type of the CPU/GPU device.\n if not use_cuda:\n device = torch.device(\"cpu\")\n torch.set_default_tensor_type(\"torch.FloatTensor\")\n else:\n device = torch.device(\"cuda\")\n torch.cuda.set_device(args.gpu_device)\n torch.set_default_tensor_type(\"torch.cuda.FloatTensor\")\n\n # Enabling cuDNN.\n torch.backends.cudnn.enabled = True\n\n training_dataloader_args[\"generator\"] = torch.Generator(device=device)\n\n labels_classes = dict(configs.DATASET.LABELS_CLASSES)\n\n labels_classes = dict(sorted(labels_classes.items(), key=lambda item: item[1]))\n\n train_dataset = SROIE2019Dataset(directory_dict=dict(configs.DATASET.TRAIN), labels_classes=labels_classes)\n\n vocabulary = train_dataset.vocabulary\n\n text_max_length = train_dataset.text_max_length\n\n class_weights = torch.as_tensor(train_dataset.class_weights.tolist(), device=device, dtype=torch.float32)\n\n train_criterion = torch.nn.CrossEntropyLoss(weight=class_weights,\n ignore_index=configs.LOSS.IGNORE_INDEX,\n reduction=\"mean\").to(device, non_blocking=True)\n\n training_dataloader_args[\"pin_memory\"] = training_dataloader_args[\"pin_memory\"] and use_cuda\n train_loader = Dataloader(dataset=train_dataset, is_train=True, **training_dataloader_args)\n\n logger.info(\"Declaration and initialization of the model and optimizer...\")\n model_args = dict(configs.MODEL.ARGS)\n model = CharLM(n_classes=configs.DATASET.NUM_CLASSES,\n max_seq_length=text_max_length,\n char_vocab_size=len(vocabulary),\n **model_args).to(device, non_blocking=True)\n\n logger.info(\"Declaration and initialization of the optimizer...\")\n optimizer_args = dict(configs.SOLVER.ADAM.ARGS)\n optimizer = torch.optim.Adam(params=model.parameters(), **optimizer_args)\n\n logger.info(\"Declaration and initialization of the learning rate scheduler...\")\n lr_scheduler_args = dict(configs.SOLVER.SCHEDULER.ARGS)\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer=optimizer, **lr_scheduler_args)\n\n start_epoch = 1\n max_epochs = configs.SOLVER.MAX_EPOCHS\n checkpoint_data = {}\n last_elapsed_time = 0.0\n plotter = total_loss_plotter_window = None\n\n if args.resume is not None:\n\n logger.info(\"Resuming the training...\")\n\n checkpoint_data = checkpointer.load(file_to_load=args.resume, map_location=device)\n\n start_epoch = checkpoint_data.get(\"epoch\", 1)\n\n last_elapsed_time = checkpoint_data.get(\"elapsed_time\", 0.0)\n\n total_loss_plotter_window = checkpoint_data.get(\"total_loss_plot_win\")\n\n optimizer_state_dict = checkpoint_data.get(\"optimizer_state_dict\")\n if optimizer_state_dict is not None:\n optimizer.load_state_dict(optimizer_state_dict)\n\n # Loading the learning rate scheduler state dict..\n lr_scheduler_state_dict = checkpoint_data.get(\"lr_scheduler_state_dict\")\n if lr_scheduler_state_dict is not None:\n lr_scheduler.load_state_dict(lr_scheduler_state_dict)\n\n model_state_dict = checkpoint_data.get(\"model_state_dict\")\n if model_state_dict is not None:\n model.load_state_dict(model_state_dict)\n\n # Loading the loss/criterion...\n train_loss = checkpoint_data.get(\"train_criterion\")\n if train_loss is not None:\n train_criterion = loss\n\n if args.use_visdom:\n logger.info(\"Initialising the visualiser 'Visdom'\")\n plotter = VisdomVisualizer(port=configs.VISDOM.PORT, env_name=configs.VISDOM.ENV_NAME)\n\n if plotter is not None and total_loss_plotter_window is None:\n logger.info(\"Plot creations...\")\n title_name = \"Character Level CNN + Highway + BiLSTM: training\"\n legend_names = [\"Training loss\"]\n total_loss_plotter_window = plotter.createPlot(xLabel=\"Epochs\", yLabel=\"Loss\",\n legend_names=legend_names, title_name=title_name)\n\n logger.info(\"About to start the training...\")\n\n # Force the garbage collector to run.\n gc.collect()\n\n if use_cuda:\n # Before starting the training, all the unoccupied cached memory are released.\n torch.cuda.empty_cache()\n\n # waits for all tasks in the GPU to complete.\n torch.cuda.current_stream(device).synchronize()\n\n # Starting time.\n start_time = time.time()\n\n for current_epoch in range(start_epoch, max_epochs + 1):\n\n training_loss = train(model=model, optimizer=optimizer, train_loader=train_loader,\n criterion=train_criterion, device=device, logger=logger,\n current_epoch=current_epoch, max_epochs=max_epochs,\n entities_names=list(labels_classes.keys()))\n\n # Update the learning rate.\n lr_scheduler.step(current_epoch)\n\n if use_cuda:\n # waits for all tasks in the GPU to complete\n torch.cuda.current_stream(device).synchronize()\n\n elapsed_time = last_elapsed_time + (time.time() - start_time)\n\n remaining_time, estimated_finish_time = get_process_time(start_time=start_time,\n elapsed_time=elapsed_time,\n current_epoch=current_epoch,\n max_epochs=max_epochs)\n\n logger.info(\"Elapsed time: {et} seconds || \"\n \"Remaining time: {lt} seconds || \"\n \"ETA: {eta}\\n\".format(\n et=elapsed_time,\n lt=remaining_time,\n eta=estimated_finish_time\n ))\n\n # Updating important data.\n checkpoint_data.update({\n \"epoch\": current_epoch + 1,\n \"elapsed_time\": elapsed_time,\n \"model_state_dict\": model.state_dict(),\n \"optimizer_state_dict\": optimizer.state_dict(),\n \"lr_scheduler_state_dict\": lr_scheduler.state_dict()\n })\n\n # Updating the plot if it was previously set (i.e., the plotter is not None)\n if plotter is not None and \\\n current_epoch > 0 and \\\n args.plot_steps is not None and \\\n args.plot_steps > 0 and \\\n current_epoch % args.plot_steps == 0:\n total_loss_data_y = [training_loss]\n plotter.update_plot(window=total_loss_plotter_window, data_x=current_epoch, data_y=total_loss_data_y)\n checkpoint_data.update({\"total_loss_plot_win\": total_loss_plotter_window})\n\n if current_epoch > 0 and \\\n args.save_steps is not None and \\\n args.save_steps > 0 and \\\n current_epoch % args.save_steps == 0 and \\\n current_epoch != max_epochs:\n\n logger.info(\"Saving a checkpoint at epoch {0}...\\n\".format(current_epoch))\n\n checkpointer.save(name=\"CHARLM_CHECKPOINT_EPOCH_{0}\".format(current_epoch), data=checkpoint_data)\n\n # Saving plots...\n if plotter is not None:\n plotter.save()\n\n logger.info(\"Training has just finished. Saving the final checkpoint...\\n\")\n checkpointer.save(name=\"CHARLM_FINAL_CHECKPOINT\", data=checkpoint_data)\n\n if use_cuda:\n # Just in case, we release all the unoccupied cached memory after training.\n torch.cuda.empty_cache()\n\n\ndef train(model: torch.nn.Module,\n optimizer: torch.optim.Optimizer,\n train_loader: torch.utils.data.DataLoader,\n criterion: torch.nn.Module,\n device: torch.device,\n current_epoch: int,\n max_epochs: int,\n logger: Logger,\n entities_names: list):\n true_classes = []\n\n predicted_classes = []\n\n training_losses = AverageMeter(fmt=\":.6f\")\n\n model = model.train()\n\n for current_iteration, batch_samples in enumerate(train_loader):\n\n text_features, text_class_labels = batch_samples\n\n text_features = text_features.to(device, non_blocking=True)\n\n text_class_labels = text_class_labels.to(device, non_blocking=True)\n\n # Clearing out the model's gradients before doing backprop.\n # 'set_to_none=True' here can modestly improve performance\n optimizer.zero_grad(set_to_none=True)\n\n outputs = model(text_features)\n\n inputs = outputs.contiguous().view(-1, configs.DATASET.NUM_CLASSES)\n\n targets = text_class_labels.contiguous().view(-1)\n\n loss = criterion(inputs, targets)\n\n loss.backward()\n\n optimizer.step()\n\n # Calling \".item()\" operation requires synchronization. For further info, check this out:\n # https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html And this as well:\n # https://stackoverflow.com/questions/56816241/difference-between-detach-and-with-torch-nograd-in-pytorch\n # Just in case of in-place operations, one can add .clone() to avoid nasty modifications...\n training_losses.update(loss.detach().clone(), n=text_features.size(0))\n\n with torch.no_grad():\n\n _, predictions = torch.max(torch.softmax(outputs, dim=2), dim=2)\n\n predicted_classes.extend(predictions.contiguous().view(-1).tolist())\n\n true_classes.extend(text_class_labels.contiguous().view(-1).tolist())\n\n if current_iteration > 0 and args.log_steps is not None and \\\n args.log_steps > 0 and current_iteration % args.log_steps == 0:\n logger.info(\"Epoch: {curr_epoch}/{maximum_epochs} || \"\n \"Iteration: {iter} || \"\n \"Learning rate: {lr} || \"\n \"Loss: {loss}\\n\".format(\n curr_epoch=current_epoch,\n maximum_epochs=max_epochs,\n iter=current_iteration,\n lr=optimizer.param_groups[0][\"lr\"],\n loss=training_losses\n ))\n\n n_classes = configs.DATASET.NUM_CLASSES\n MCM = multilabel_confusion_matrix(inputs=predicted_classes, targets=true_classes, n_classes=n_classes)\n\n TPc = MCM[:, 1, 1]\n\n FPc = MCM[:, 0, 1]\n\n FNc = MCM[:, 1, 0]\n\n precision_denominator = check_denominator_consistency(TPc + FPc)\n precision = np.expand_dims(TPc / precision_denominator, axis=0).T\n\n recall_denominator = check_denominator_consistency(TPc + FNc)\n recall = np.expand_dims(TPc / recall_denominator, axis=0).T\n\n f1_denominator = check_denominator_consistency(precision + recall)\n f1_Score = 2.0 * ((precision * recall) / f1_denominator)\n\n data_list = np.concatenate([recall, precision, f1_Score], axis=1)\n\n grid_data_list = [] + configs.TABULATE.DATA_LIST\n for i, data in enumerate(data_list):\n rec = data[1]\n prec = data[0]\n hmean = data[2]\n grid_data_list.append([entities_names[i], rec, prec, hmean])\n\n training_loss = training_losses.global_avg\n\n logger.info(\"Training loss = {0}\\n\".format(training_loss))\n\n logger.info(\"Results:\\n\" + tabulate(grid_data_list, headers=\"firstrow\", tablefmt=\"grid\"))\n\n return training_loss\n\n\nif __name__ == '__main__':\n\n # Guarding against bad arguments.\n\n if args.save_steps is not None and args.save_steps <= 0:\n raise ValueError(\"{0} is an invalid value for the argument: --save-steps\".format(args.save_steps))\n elif args.log_steps is not None and args.log_steps <= 0:\n raise ValueError(\"{0} is an invalid value for the argument: --log-steps\".format(args.log_steps))\n elif args.plot_steps is not None and args.plot_steps <= 0:\n raise ValueError(\"{0} is an invalid value for the argument: --plot-steps\".format(args.plot_steps))\n elif args.resume is not None and not os.path.isfile(args.resume):\n raise ValueError(\"The path to the checkpoint data path_to_file is wrong!\")\n\n gpu_devices = list(range(torch.cuda.device_count()))\n if len(gpu_devices) != 0 and args.gpu_device not in gpu_devices:\n raise ValueError(\"Your GPU ID is out of the range! \"\n \"You may want to check it with 'nvidia-smi' or \"\n \"'Task Manager' for Windows users.\")\n\n if args.use_cuda and not torch.cuda.is_available():\n raise ValueError(\"The argument --use-cuda is specified but it seems you cannot use CUDA!\")\n\n if args.config_file is not None:\n if not os.path.isfile(args.config_file):\n raise ValueError(\"The configs file is wrong!\")\n else:\n configs.merge_from_file(args.config_file)\n configs.freeze()\n\n main()\n","repo_name":"BlackStar1313/ICDAR-2019-RRC-SROIE","sub_path":"keyword_information_extraction/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":14806,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"16"} +{"seq_id":"12588966073","text":"# To-Do:\n# Open connection, wrap new subscriptions in threading or multi-processing\n# Creat api endpoint to allow other programs to start thread and use data // or write to db and access there\n# Enable passing arguments to ws.send to open sockets for new data\n# Add ability to make trade calls\n# Keep it simple\n# Document\n# Save to sqlite db here, with a seperate table for each sub\n\n\nimport json\nimport time\nfrom websocket import create_connection, WebSocketConnectionClosedException\n\nENDPOINT = \"wss://stream.binance.com:9443/ws\"\n\nws = create_connection(ENDPOINT)\n\nETH_KLINE = json.dumps(\n {\n \"method\": \"SUBSCRIBE\",\n \"params\": [\n \"ethusdt@kline_1m\",\n \"ethusdt@kline_5m\",\n \"ethusdt@kline_1h\",\n \"ethusdt@kline_8h\",\n \"ethusdt@kline_12h\"\n ],\n \"id\": 1\n }\n)\n\nws.send(ETH_KLINE)\n\nwhile True:\n result = ws.recv()\n \n msg = json.loads(result)\n\n print(\"message:\", msg)","repo_name":"lckynmbrsvn/crypto-cat","sub_path":"services/binance/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37042351792","text":"'''\nserverとは別のファイルで実行\nserverとは別のターミナルで\"python client.py\"を起動, \nserverとうまく通信が行えた場合, {ファイル名}_copy.txtというファイルが生成される\n'''\nimport socket\n\n# socketの作成\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)# AF_INET:IPv4, SOCK_STREAM:TCP通信\ns.connect((socket.gethostname(), 12345)) # serverと同じIPアドレスとportに接続\ns.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # {adress} has been established 回避\nflag = False # 初期フラグの設定\nwhile True:\n keyinput = input(\"ファイル名を入力してください >> \") # キーボードからの入力受け取り\n s.send(keyinput.encode()) # データをserverに送信\n msg = s.recv(1024).decode(\"utf-8\") # serverからのデータの返信を受け取る\n if len(msg) <= 0: \n break\n\n if not msg == \"null\": # ファイルが存在していた場合\n print(\"ファイルの中身は\", msg) # 結果を表示\n print(\"ファイル名:\", keyinput[:-4] + \"_copy.txt に保存します.\")\n with open(keyinput[:-4] + \"_copy.txt\", \"w\") as f:\n f.write(msg)\n break\n else: # 存在しなかった場合は, もう一度ファイル名を入力するように要求\n print(\"ファイル名が間違っている可能性があります. もう一度入力してください\")\n\n ","repo_name":"Chotaro-0322/Socket_transport","sub_path":"socket_readFile/client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"40538450739","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd.function import Function\nimport torch.optim as optim\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelBinarizer\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom tqdm import tqdm\nfrom src.classes.utils import save, load\nfrom torch.utils.data import DataLoader, Dataset\n\n\nclass CenterLoss(nn.Module):\n def __init__(self, num_classes, feat_dim, size_average=True):\n super(CenterLoss, self).__init__()\n self.centers = nn.Parameter(torch.randn(num_classes, feat_dim))\n self.centerlossfunc = CenterlossFunc.apply\n self.feat_dim = feat_dim\n self.size_average = size_average\n\n def forward(self, label, feat):\n batch_size = feat.size(0)\n feat = feat.view(batch_size, -1)\n # To check the dim of centers and features\n if feat.size(1) != self.feat_dim:\n raise ValueError(\"Center's dim: {0} should be equal to input feature's \\\n dim: {1}\".format(self.feat_dim, feat.size(1)))\n batch_size_tensor = feat.new_empty(1).fill_(\n batch_size if self.size_average else 1)\n loss = self.centerlossfunc(\n feat, label, self.centers, batch_size_tensor)\n return loss\n\n\nclass CenterlossFunc(Function):\n @staticmethod\n def forward(ctx, feature, label, centers, batch_size):\n ctx.save_for_backward(feature, label, centers, batch_size)\n centers_batch = centers.index_select(0, label.long())\n return (feature - centers_batch).pow(2).sum() / 2.0 / batch_size\n\n @staticmethod\n def backward(ctx, grad_output):\n feature, label, centers, batch_size = ctx.saved_tensors\n centers_batch = centers.index_select(0, label.long())\n diff = centers_batch - feature\n # init every iteration\n counts = centers.new_ones(centers.size(0))\n ones = centers.new_ones(label.size(0))\n grad_centers = centers.new_zeros(centers.size())\n\n counts = counts.scatter_add_(0, label.long(), ones)\n grad_centers.scatter_add_(\n 0, label.unsqueeze(1).expand(\n feature.size()).long(), diff)\n grad_centers = grad_centers / counts.view(-1, 1)\n return - grad_output * diff / batch_size, None, grad_centers / batch_size, None\n\n\nclass CenterLossNN(nn.Module):\n def __init__(self, x_shape, n_classes, latent_dim):\n super(CenterLossNN, self).__init__()\n self.conv1_1 = nn.Conv2d(1, 32, kernel_size=2)\n torch.nn.init.kaiming_normal_(self.conv1_1.weight)\n self.bn_1 = nn.BatchNorm2d(32)\n self.prelu1_1 = nn.PReLU()\n self.conv1_2 = nn.Conv2d(32, 64, kernel_size=2)\n torch.nn.init.kaiming_normal_(self.conv1_2.weight)\n self.bn_2 = nn.BatchNorm2d(64)\n self.do_1 = nn.Dropout2d(p=0.2)\n self.prelu1_2 = nn.PReLU()\n self.conv2_1 = nn.Conv2d(64, 128, kernel_size=2)\n torch.nn.init.kaiming_normal_(self.conv2_1.weight)\n self.bn_3 = nn.BatchNorm2d(128)\n self.prelu3_2 = nn.PReLU()\n \n self.preluip1 = nn.PReLU()\n self.ip1 = nn.Linear(128 * (x_shape[2] - 3), latent_dim)\n self.ip2 = nn.Linear(latent_dim, n_classes, bias=True)\n\n def forward(self, x):\n x = self.prelu1_1(self.bn_1(self.conv1_1(x)))\n x = self.prelu1_2(self.do_1(self.bn_2(self.conv1_2(x))))\n x = self.prelu3_2(self.bn_3(self.conv2_1(x)))\n \n x = torch.flatten(x, start_dim=1, end_dim=-1)\n \n ip1 = self.preluip1(self.ip1(x))\n \n ip2 = self.ip2(ip1)\n ip2 = F.log_softmax(ip2, dim=1)\n \n return ip1, ip2\n\nclass MLP_Network(nn.Module):\n def __init__(self, input_dim, output_dim, hidden_layer_dim, hidden_layers_num=2, dropout_rate=0.2):\n super( MLP_Network, self).__init__()\n \n \n def init_weights(m):\n if isinstance(m, nn.Linear):\n nn.init.kaiming_uniform_(m.weight, mode='fan_in', nonlinearity='relu')\n \n self.input_layer = nn.Sequential(\n nn.Linear(input_dim, hidden_layer_dim),\n nn.BatchNorm1d(hidden_layer_dim),\n nn.PReLU()\n )\n self.input_layer.apply(init_weights)\n \n\n self.middle_layers = []\n for i in range( hidden_layers_num ):\n hidden_layer = nn.Sequential(\n nn.Linear(hidden_layer_dim, hidden_layer_dim),\n nn.BatchNorm1d(hidden_layer_dim),\n nn.Dropout(p=dropout_rate),\n nn.PReLU(),\n )\n hidden_layer.apply(init_weights)\n self.middle_layers.append( hidden_layer )\n self.middle_layers = nn.Sequential( * self.middle_layers )\n \n self.latent_output = nn.Sequential(\n nn.Linear(hidden_layer_dim, hidden_layer_dim),\n #nn.BatchNorm1d(hidden_layer_dim),\n nn.PReLU(),\n )\n self.latent_output.apply(init_weights)\n \n self.classes_output = nn.Linear(hidden_layer_dim, output_dim, bias=True)\n nn.init.kaiming_uniform_(self.classes_output.weight, mode='fan_in', nonlinearity='relu')\n\n pass\n\n def forward(self, x):\n x = self.input_layer(x)\n x = self.middle_layers(x)\n \n latent_features = self.latent_output(x)\n \n classes_output = self.classes_output(latent_features)\n classes_output = F.log_softmax(classes_output, dim=1)\n \n \n return latent_features, classes_output\n\nclass CompressorTrainDataset(Dataset):\n def __init__(self, x, y, device, stack_count):\n \n self.x = x\n self.y = y\n self.device = device\n self.stack_count = stack_count\n \n pass\n \n def __getitem__(self, id):\n \n if self.stack_count > 1:\n stacked_x = np.vstack([self.x[id] for i in range(self.stack_count)])\n stacked_x = torch.Tensor(stacked_x)\n stacked_x = torch.unsqueeze(stacked_x, dim=0)\n else:\n stacked_x = self.x[id]\n stacked_x = torch.Tensor(stacked_x)\n\n stacked_x = stacked_x.to(self.device)\n \n y = self.y[id]\n y = torch.tensor(y, dtype=torch.int64)\n y = y.to(self.device)\n \n return stacked_x, y\n \n def __len__(self):\n \n data_length = len(self.y)\n \n return data_length\n\nclass CompressorTestDataset(Dataset):\n def __init__(self, x, device, stack_count):\n \n self.x = x\n self.device = device\n self.stack_count = stack_count\n \n pass\n \n def __getitem__(self, id):\n \n if self.stack_count > 1:\n stacked_x = np.vstack([self.x[id] for i in range(self.stack_count)])\n stacked_x = torch.Tensor(stacked_x)\n stacked_x = torch.unsqueeze(stacked_x, dim=0)\n else:\n stacked_x = self.x[id]\n stacked_x = torch.Tensor(stacked_x)\n \n stacked_x = stacked_x.to(self.device)\n \n return stacked_x\n \n def __len__(self):\n \n data_length = len(self.x)\n \n return data_length\n\nclass CenterLossCompressor():\n def __init__(self):\n self.model = None\n self.device = torch.device('cuda')\n\n self.latent_dim = None\n self.n_classes = None\n self.stack_count = None\n\n pass\n\n def fit(self, x, y, validation_part=0.05,\n batch_size=100, epochs=100, latent_dim=100, stack_count=1):\n \n self.n_classes = len(np.unique(y))\n self.latent_dim = latent_dim\n self.stack_count = stack_count\n\n \"\"\"self.model = CenterLossNN(\n x_shape=(len(x), self.stack_count, len(x[0])),\n n_classes=self.n_classes,\n latent_dim=latent_dim)\"\"\"\n \n self.model = MLP_Network(input_dim=len(x[0]), output_dim=self.n_classes, \n hidden_layer_dim=latent_dim, \n hidden_layers_num=2, \n dropout_rate=0.05)\n \n self.model.to(self.device)\n \n self.model.train()\n\n loss_weight = 1\n nllloss = nn.CrossEntropyLoss().to(self.device)\n centerloss = CenterLoss(self.n_classes, self.latent_dim).to(self.device)\n optimizer4nn = optim.Adam(self.model.parameters(), lr=0.001)\n optimzer4center = optim.Adam(centerloss.parameters(), lr=0.5)\n \n train_dataset = CompressorTrainDataset(x, y, device=\"cuda\", stack_count=stack_count)\n \n compressor_batch_size = len(x) // 100 + 2\n if compressor_batch_size > 256:\n compressor_batch_size = 256\n print(\"Compressor batch size: {}\".format(compressor_batch_size))\n train_dataloader = DataLoader(train_dataset, batch_size=compressor_batch_size, shuffle=True, drop_last=True)\n\n for epoch in range(epochs):\n for batch_x, batch_y in tqdm(train_dataloader, desc='CenterLossCompressor fit | Epoch {} of {}'.format(epoch+1, epochs)):\n ip1, pred = self.model(batch_x)\n loss = nllloss(pred, batch_y) + loss_weight * centerloss(batch_y, ip1)\n\n optimizer4nn.zero_grad()\n optimzer4center.zero_grad()\n loss.backward()\n optimizer4nn.step()\n optimzer4center.step()\n torch.cuda.empty_cache()\n pass\n\n def predict(self, x, batch_size=100):\n \n self.model.eval()\n \n test_dataset = CompressorTestDataset(x, device=\"cuda\", stack_count=self.stack_count)\n test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)\n\n features = []\n for x in test_dataloader:\n with torch.no_grad():\n feats, labels = self.model(x)\n feats = feats.data.cpu().numpy()\n features.append(feats)\n batch = x.to('cpu')\n del batch\n features = np.vstack(features)\n torch.cuda.empty_cache()\n\n return features\n","repo_name":"CameleoGrey/newsvibe_update_service_public_version","sub_path":"src/classes/profittm/CenterLossCompressor.py","file_name":"CenterLossCompressor.py","file_ext":"py","file_size_in_byte":10154,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"25219982118","text":"\"\"\"\nÍrj egy függvényt \"parosParatlan\" néven, amely a bejövő paraméter alapján kiírja, hogy az páros-e vagy páratlan. Pl. így: Ez a szám ... páros!\n\"\"\"\n\n\ndef paros_paratlan(num: int):\n print('Ez a szám pár', 'atlan' if bool(num % 2) else 'os', ': ', aa, sep='')\n\n\nif __name__ == '__main__':\n print('Egy függvényt tesztelünk, amely egy egész számról mondja meg, hogy páratlan-e')\n aa = int(input('Adj meg egy számot:'))\n paros_paratlan(aa)\n","repo_name":"Petint/python-archhives","sub_path":"pataky/python.tanit.hu/03 Függvények/03-05 Függvény - Páros-páratlan kiírás.py","file_name":"03-05 Függvény - Páros-páratlan kiírás.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"44202303365","text":"import app\nimport unittest\n\nclass MyTestCase(unittest.TestCase):\n \n def setUp(self):\n app.connex_app.app.testing = True\n self.app = app.connex_app.app.test_client()\n\n def test_read_empty_movie(self):\n #test ketika id yang diinput tidak ada di database\n result = self.app.get('/api/movie/manage/8912')\n # Make your assertions\n self.assertEquals(result.status_code, 404)\n \n def test_read_movie_director(self):\n # test ketika mencoba mendapatkan nama director dari movies dengan id ke 45612\n result = self.app.get('/api/movie/manage/45612')\n # Make your assertions\n response = result.json\n director = response[\"directors\"][\"name\"]\n self.assertEqual(director, \"Mel Brooks\")\n \n def test_read_director_revenue(self):\n # test ketika mencoba mendapatkan revenue dari film yang dibuat oleh director dengan id 6623\n result = self.app.get('/api/director/revenue/6623')\n # Make your assertions\n response = result.json\n self.assertEqual(response, 535692636)\n \n def test_read_director_department(self):\n # test ketika mencoba mendapatkan department dari director dengan id 6623\n result = self.app.get('/api/director/6623')\n # Make your assertions\n response = result.json\n director = response[\"department\"]\n self.assertEqual(director, \"Directing\")\n \nif __name__ == '__main__':\n unittest.main()","repo_name":"matthewsutanto/OCBC-H8-Python","sub_path":"Assignment/Final Project/h8ocbc-milestone1-024/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"551025077","text":"from __future__ import print_function\nfrom pycparser import c_ast, parse_file\nimport minic_ast as mc\nfrom mutils import lmap\n\n\n# In the transformation process, all the nodes will receive an id for\n# hashing purposes. This will be used in the analysis.\nsid = 0\n\n\ndef get_new_id():\n global sid\n sid += 1\n return sid\n\n\ndef set_sid(i):\n global sid\n sid = i\n\n\n# Assignments are all converted into assignments using the '=' operator.\n# All assignments using other operators are converted into assignments\n# using the '=' and the expression on the right hand side is a binary\n# expression such that the assignment has the same semantics.\ndef of_assignment(orig):\n lvalue = transform(orig.lvalue)\n if orig.rvalue is not None:\n rvalue = transform(orig.rvalue)\n else:\n rvalue = None\n\n final_rvalue = {\n '=': rvalue,\n '+=': mc.BinaryOp('+', lvalue, rvalue, nid=get_new_id()),\n '-=': mc.BinaryOp('-', lvalue, rvalue, nid=get_new_id()),\n '*=': mc.BinaryOp('*', lvalue, rvalue, nid=get_new_id()),\n '/=': mc.BinaryOp('/', lvalue, rvalue, nid=get_new_id()),\n '%=': mc.BinaryOp('%', lvalue, rvalue, nid=get_new_id()),\n '^=': mc.BinaryOp('^', lvalue, rvalue, nid=get_new_id()),\n '|=': mc.BinaryOp('|', lvalue, rvalue, nid=get_new_id()),\n '>>=': mc.BinaryOp('>>', lvalue, rvalue, nid=get_new_id()),\n '<<=': mc.BinaryOp('<<', lvalue, rvalue, nid=get_new_id()),\n '&=': mc.BinaryOp('&', lvalue, rvalue, nid=get_new_id()),\n '++': mc.BinaryOp('+', lvalue, mc.Constant('int', '1'), nid=get_new_id()),\n '--': mc.BinaryOp('-', lvalue, mc.Constant('int', '1'), nid=get_new_id()),\n }.get(orig.op, mc.EmptyStatement())\n\n return mc.Assignment(lvalue, final_rvalue, coord=orig.coord, nid=get_new_id())\n\n\n# PyCParser represents increment and decrement as unary operations, we convert them\n# to assignments. Other unary operators are kept as is.\ndef maybe_special_unary(orig):\n return {\n 'p--': (lambda x: mc.Assignment(x, mc.BinaryOp('-', x, mc.Constant('int', '1')), nid=get_new_id())),\n 'p++': (lambda x: mc.Assignment(x, mc.BinaryOp('+', x, mc.Constant('int', '1')), nid=get_new_id())),\n '--': (lambda x: mc.Assignment(x, mc.BinaryOp('-', x, mc.Constant('int', '1')), nid=get_new_id())),\n '++': (lambda x: mc.Assignment(x, mc.BinaryOp('+', x, mc.Constant('int', '1')), nid=get_new_id()))\n }.get(orig.op, lambda x: mc.UnaryOp(orig.op, x, coord=orig.coord, nid=get_new_id()))(transform(orig.expr))\n\n\n# Checks that the original construct is a value, a not any another construct. It helps\n# in checking that we have terminal symbols at the right places.\ndef v(orig):\n if isinstance(orig, str) or isinstance(orig, int) or isinstance(orig, float) \\\n or isinstance(orig, bool) or orig is None:\n return orig\n else:\n print(\"Unexpected type for value %r\" % orig)\n raise TypeError\n\n\ndef tmap(x):\n if isinstance(x, list):\n return lmap(transform, x)\n else:\n return transform(x)\n\n\nclass ErrorUnsupportedConstruct(TypeError):\n def __init__(self, construct):\n self.messsage = \"Unsupported construct %s\" % construct\n\n\n# If there is no match case in the dictionary style switch, then it means it is a construct\n# that is not supported in minic.\ndef unsupported(y):\n if y is None:\n return None\n else:\n raise ErrorUnsupportedConstruct(y)\n\n\n# The main transformer function. This is close to a mapping for PyCparser AST nodes to Minic nodes, except\n# that there are less constructs and we have to transform assignments and unary operators.\ndef transform(x):\n return {\n c_ast.ArrayDecl: (lambda orig: mc.ArrayDecl(transform(orig.type), orig.dim, coord=orig.coord, nid=get_new_id())),\n c_ast.ArrayRef: (lambda orig: mc.ArrayRef(transform(orig.name), transform(orig.subscript), coord=orig.coord, nid=get_new_id())),\n c_ast.Assignment: (lambda orig: of_assignment(orig)),\n c_ast.BinaryOp: (lambda orig: mc.BinaryOp(v(orig.op), transform(orig.left), transform(orig.right),\n coord=orig.coord, nid=get_new_id())),\n c_ast.Compound: (lambda orig: mc.Block(lmap(transform, orig.block_items), coord=orig.coord, nid=get_new_id())),\n c_ast.Constant: (lambda orig: mc.Constant(transform(orig.type), v(orig.value), coord=orig.coord, nid=get_new_id())),\n c_ast.Decl: (lambda orig: mc.Decl(transform(orig.name), transform(orig.funcspec), transform(orig.type),\n transform(orig.init), coord=orig.coord, nid=get_new_id())),\n c_ast.DeclList: (lambda orig: mc.DeclList(tmap(orig.decls), coord=orig.coord, nid =get_new_id())),\n c_ast.DoWhile: (lambda orig: mc.DoWhile(transform(orig.cond), transform(orig.stmt), coord=orig.coord,\n nid=get_new_id())),\n c_ast.EmptyStatement: (lambda orig: mc.EmptyStatement(coord=orig.coord, nid=get_new_id())),\n c_ast.ExprList: (lambda orig: mc.ExprList(tmap(orig.exprs), coord=orig.coord, nid=get_new_id())),\n c_ast.FileAST: (lambda orig: mc.FileAST(lmap(transform, orig.ext), coord=orig.coord, nid=get_new_id())),\n c_ast.For: (lambda orig: mc.For(transform(orig.init), transform(orig.cond), transform(orig.next),\n transform(orig.stmt), coord=orig.coord, nid=get_new_id())),\n c_ast.FuncCall: (lambda orig: mc.FuncCall(transform(orig.name), tmap(orig.args), coord=orig.coord, nid=get_new_id())),\n c_ast.FuncDecl: (lambda orig: mc.FuncDecl(tmap(orig.args), transform(orig.type), coord=orig.coord, nid=get_new_id())),\n c_ast.FuncDef: (lambda orig: mc.FuncDef(transform(orig.decl), tmap(orig.param_decls), transform(orig.body),\n coord=orig.coord, nid=get_new_id())),\n c_ast.ID: (lambda orig: mc.ID(v(orig.name), coord=orig.coord, nid=get_new_id())),\n c_ast.IdentifierType: (lambda orig: mc.IdentifierType(tmap(orig.names), nid=get_new_id())),\n c_ast.If: (lambda orig: mc.If(transform(orig.cond), transform(orig.iftrue), transform(orig.iffalse),\n coord=orig.coord, nid=get_new_id())),\n c_ast.InitList: (lambda orig: mc.InitList(tmap(orig.exprs), coord=orig.coord, nid=get_new_id())),\n c_ast.NamedInitializer: (lambda orig: mc.NamedInitializer(v(orig.name), transform(orig.expr),\n coord=orig.coord, nid=get_new_id())),\n c_ast.ParamList: (lambda orig: mc.ParamList(tmap(orig.params), coord=orig.coord, nid=get_new_id())),\n c_ast.PtrDecl: (lambda orig: mc.PtrDecl(transform(orig.type), coord=orig.coord, nid=get_new_id())),\n c_ast.Return: (lambda orig: mc.Return(transform(orig.expr), coord=orig.coord, nid=get_new_id())),\n c_ast.TernaryOp: (lambda orig: mc.TernaryOp(transform(orig.cond), transform(orig.iftrue),\n transform(orig.iffalse), coord=orig.coord, nid=get_new_id())),\n c_ast.Typename: (lambda orig: mc.Typename(v(orig.name), transform(orig.type), coord=orig.coord, nid=get_new_id())),\n c_ast.TypeDecl: (lambda orig: mc.TypeDecl(v(orig.declname), transform(orig.type), coord=orig.coord, nid=get_new_id())),\n c_ast.UnaryOp: (lambda orig: maybe_special_unary(orig)),\n c_ast.While: (lambda orig: mc.While(transform(orig.cond), transform(orig.stmt), coord=orig.coord, nid=get_new_id())),\n str: (lambda orig: orig),\n int: (lambda orig: orig),\n float: (lambda orig: orig),\n list: (lambda orig: tmap(orig)),\n }.get(x.__class__, lambda y: unsupported(y))(x)\n\n\ndef minic_parse_file(filename):\n return transform(parse_file(filename))\n\n\ndef wrap(filename):\n infile = open(filename, 'r')\n outfilename = filename+'_wrap.c'\n outfile = open(outfilename, 'w')\n outfile.write(\"void main(){\\n\")\n for line in infile:\n outfile.write(\"\\t\" + line)\n outfile.write(\"}\\n\")\n outfile.close()\n infile.close()\n return outfilename\n\n\ndef minic_parse_wrap_file(filename):\n return transform(parse_file(wrap(filename)))","repo_name":"LittlePetunia/410p3","sub_path":"pyminic/minic/c_ast_to_minic.py","file_name":"c_ast_to_minic.py","file_ext":"py","file_size_in_byte":8258,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"25995716152","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nHere we talk about reading and writing to files\r\n\r\n@author: manugarri\r\n\"\"\"\r\n\r\n#%%\r\n\r\n\"\"\"\r\n******************************************************************************\r\nFolder creation\r\n******************************************************************************\r\n\"\"\"\r\n\r\n# we can create folders with os.makedirs()\r\nimport os\r\nos.makedirs(\"./data/names/\", exist_ok=True)\r\n#%%\r\n\"\"\"\r\n******************************************************************************\r\nLISTING FILES\r\n******************************************************************************\r\n\"\"\"\r\nfiles_in_current_folder = os.listdir(\".\")\r\nprint(files_in_current_folder)\r\n\r\n\r\n#%%\r\n\"\"\"\r\n*******************************************************************************\r\nWRITING TO FILES\r\n*******************************************************************************\r\n\"\"\"\r\n#%%\r\n# We can use `open` to open files. If the file does not exist it will throw an error\r\nnonexisten_file = open(\"./data/names/users.txt\")\r\n\r\n#%%\r\n# If we want to write to a file (create it) we must specify we want to open the file in write mode\r\nwriting_file = open(\"./data/names/users.txt\", \"w\")\r\nwriting_file.write(\"James\")\r\nwriting_file.write(\"Mary\")\r\n#%%\r\n# Nothing is actually written until we close the file\r\nwriting_file.close()\r\n#%%\r\n# If we open a file in \"w\" mode again, it will delete the existing content and create a blank file\r\nwriting_file = open(\"./data/names/users.txt\", \"w\")\r\nwriting_file.write(\"Michael\")\r\nwriting_file.write(\"Adam\")\r\nwriting_file.close()\r\n\r\n#%%\r\n# We can open a file in append mode with \"a\". This way we wont erase existing \r\n# content and will add to the end of the file\r\nappend_file = open(\"./data/names/users.txt\", \"a\")\r\nappend_file.write(\"James\")\r\nappend_file.write(\"Mary\")\r\nappend_file.close()\r\n\r\n\r\n#%%\r\n\"\"\"\r\nUsually we don't want to use the open method like we did, because if any error\r\nhappens between opening and closing the file we can lose the file\r\n\r\nThe recommended way of reading and writing to files is via a context manager\r\n\"\"\"\r\nusers = [\"Manuel\", \"Anthony\", \"John\", \"Mary\"]\r\n#we open a context with the keyword `with`\r\nwith open(\"./data/names/users.txt\", \"w\") as file_name:\r\n for user in users:\r\n file_name.write(users)\r\n#%%\r\n# So far we have been writing all the names one next to each other, if we want to \r\n# put each one in one line, we need to write a newline symbol \"\\n\" after each one\r\n\r\nusers = [\"Manuel\", \"Anthony\", \"John\", \"Mary\"]\r\n\r\nwith open(\"./data/names/users.txt\", \"w\") as file_name:\r\n for user in users:\r\n file_name.write(users)\r\n file_name.write(\"\\n\")\r\n#%%\r\n\r\n\"\"\"\r\n*******************************************************************************\r\nREADING FROM FILES\r\n*******************************************************************************\r\n\"\"\"\r\n\r\nwith open(\"./data/names/users.txt\") as fname:\r\n data = fname.read()\r\nprint(data)\r\n\r\ntype(data)\r\n#%%\r\n\r\n# the method `read()` read the whole file as text (one giant string).\r\n# If we want to separate the lines into different strings we can use the method\r\n# readlines()\r\nread_users = []\r\n\r\nwith open(\"./data/names/users.txt\") as fname:\r\n lines = fname.readlines()\r\n print(lines)\r\n for line in lines:\r\n read_users.append(line.strip(\"\\n\"))\r\nprint(read_users)\r\n\r\ntype(read_users)\r\n#%%\r\n\"\"\"\r\n*******************************************************************************\r\nUSING PATHLIB\r\n*******************************************************************************\r\n\r\nIn windows folder structure is defined with \\ while in linux/mac we use /\r\nThis can lead to problems\r\n\r\nOne way to avoid these kinds of issues is use pathlib, a cross-platform library\r\nfor dealing with files\r\n\"\"\"\r\n\r\nfrom pathlib import Path\r\n\r\nfolder = Path(\"./data/names/\")\r\n\r\nusers_file = folder / \"users.txt\"\r\nprint(type(users_file)) #PosixPath o WindowsPath\r\n# we dont need a context manager with pathlib\r\nusers_file.read_text()\r\n#%%\r\n# we can easily write to a file using pathlib\r\nfolder = Path(\"./data/names/\")\r\n\r\nfile2 = folder / \"users_2.txt\"\r\n\r\nfile2.write_text(\"hola!\")\r\n#%%\r\nprint(file2.read_text())\r\n\r\n# Pathlib does not support append mode yet!\r\n\"\"\"\r\n\r\nExercises\r\n\r\n\"\"\"\r\n#%%\r\n\"\"\"\r\nCreate a function that, given a filename, reads it and returns the longest line\r\n\r\n\"\"\"\r\n\r\n#%% \r\n\"\"\"\r\nCreate a function that takes a filename and an integer as arguments, reads the file\r\nand returns the last n lines\r\n\"\"\"\r\n\r\n\r\n#%%\r\n\"\"\"\r\nCreate a function that takes a dictionary and a filename as arguments, and writes \r\nthe dictionary as a csv file with the specified filename.\r\n\r\nCSV files (Comma-Separated-Values) are a data format to store data, where each element\r\nis a row, and each field is separated by a comma\r\n\r\nFor example, given the dictionary\r\n{\r\n\"name\": [\"Antonio\", \"Miguel\", \"Julian\", \"Andrew\"],\r\n\"age\": [45, 40, 22, 34],\r\n\"city\": [\"Murcia\", \"Lisbon\", \"Barcelona\", \"Madrid\"]\r\n}\r\n\r\nand a filename , `people.csv`,\r\n\r\nThis function would save such dictionary as a file named `people.csv` with this format:\r\n\r\n name,age,city\r\n Antonio,45,Murcia\r\n Miguel,40,Lisbon,\r\n Julian,22,Barcelona\r\n Andrew,34,Madrid\r\n\"\"\"\r\n","repo_name":"letcosta/ix_lg","sub_path":"Python Packages/intro_to_python/9.input_output.py","file_name":"9.input_output.py","file_ext":"py","file_size_in_byte":5137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12186660427","text":"import equinox as eqx\nimport gymnasium as gym\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nimport optax\nimport tensorflow_probability.substrates.jax as tfp\nfrom jaxtyping import Array, Float32, PRNGKeyArray, PyTree\nimport torch\nfrom tqdm import tqdm\nfrom torch.utils.data import DataLoader\nfrom equinox_rl.common import eqx_helpers, gym_helpers, rl_helpers\nimport matplotlib.pyplot as plt\n\n\nclass Policy(eqx.Module):\n \"\"\"Policy network for the policy gradient algorithm in a discrete action space.\"\"\"\n\n layers: list\n\n def __init__(self, layers: list[int], key: PRNGKeyArray) -> None:\n \"\"\"Initialize the policy network.\n Args:\n layers: The layers of the policy network.\n key: The key to use for initialization.\n \"\"\"\n self.layers = []\n\n for i in range(len(layers) - 1):\n key, subkey = jax.random.split(key)\n self.layers.append(eqx.nn.Linear(layers[i], layers[i + 1], key=subkey))\n del subkey\n\n # Add a ReLU activation function to all but the last layer.\n if i < len(layers) - 2:\n self.layers.append(jax.nn.relu)\n\n def __call__(self, x: Float32[Array, \"state_dims\"]) -> Array:\n \"\"\"Forward pass of the policy network.\n Args:\n x: The input to the policy network.\n Returns:\n The output of the policy network.\n \"\"\"\n for layer in self.layers:\n x = layer(x)\n return x\n\n\n@eqx.filter_jit\ndef get_action(state: Float32[Array, \"state_dims\"], policy: Policy, key: PRNGKeyArray):\n \"\"\"Get an action from the policy network.\n Args:\n state: The state to get an action from.\n policy: The policy network.\n key: The key to use for sampling.\n Returns:\n The action sampled from the policy network.\n \"\"\"\n key, subkey = jax.random.split(key)\n logits = policy(state)\n action = tfp.distributions.Categorical(logits=logits).sample(seed=subkey)\n\n return action\n\n\ndef loss_fn(\n policy: PyTree,\n states: Float32[Array, \"n_steps state_dims\"],\n actions: Float32[Array, \"n_steps\"],\n rewards: Float32[Array, \"n_steps\"],\n dones: Float32[Array, \"n_steps\"],\n gamma: float,\n) -> Array:\n logits = eqx.filter_vmap(policy)(states)\n advantages = rl_helpers.get_total_discounted_rewards(rewards, gamma)\n\n loss = rl_helpers.get_policy_gradient_discrete_loss(logits, actions, advantages)\n return loss\n\n\n@eqx.filter_jit\ndef step(\n states: Float32[Array, \"n_steps state_dims\"],\n actions: Float32[Array, \"n_steps\"],\n rewards: Float32[Array, \"n_steps\"],\n dones: Float32[Array, \"n_steps\"],\n gamma: float,\n optimizer: optax.GradientTransformation,\n opt_state: PyTree,\n policy: PyTree,\n):\n loss, grads = eqx.filter_value_and_grad(loss_fn)(\n policy, states, actions, rewards, dones, gamma\n )\n updates, opt_state = optimizer.update(grads, opt_state, policy)\n policy = eqx.apply_updates(policy, updates)\n\n return policy, opt_state, loss\n\n\ndef train(\n states: Float32[Array, \"n_steps state_dims\"],\n actions: Float32[Array, \"n_steps\"],\n rewards: Float32[Array, \"n_steps\"],\n dones: Float32[Array, \"n_steps\"],\n gamma: float,\n policy: PyTree,\n opt_state: PyTree,\n optimizer: optax.GradientTransformation,\n) -> tuple[PyTree, PyTree, np.float32]:\n \"\"\"Train the policy network.\"\"\"\n\n # Convert jax arrays to numpy arrays for the dataloader\n states = np.array(states) # type: ignore\n actions = np.array(actions) # type: ignore\n rewards = np.array(rewards) # type: ignore\n dones = np.array(dones) # type: ignore\n dataset = gym_helpers.RLDataset(states, actions, rewards, dones)\n dataloader = DataLoader(dataset, batch_size=8, shuffle=False, drop_last=True)\n\n losses = []\n\n for batch in dataloader:\n b_states, b_actions, b_rewards, b_dones = batch\n\n b_states = jnp.array(b_states.numpy())\n b_actions = jnp.array(b_actions.numpy())\n b_rewards = jnp.array(b_rewards.numpy())\n b_dones = jnp.array(b_dones.numpy())\n\n policy, opt_state, loss = step(\n b_states, b_actions, b_rewards, b_dones, gamma, optimizer, opt_state, policy\n )\n\n losses.append(loss)\n\n return policy, opt_state, np.mean(losses)\n\n\ndef experiment(hyperparams: dict):\n \"\"\"Run the experiment.\"\"\"\n env_name = hyperparams[\"env_name\"]\n learning_rate = hyperparams[\"learning_rate\"]\n gamma = hyperparams[\"gamma\"]\n layers = hyperparams[\"layers\"]\n n_episodes = hyperparams[\"n_episodes\"]\n\n policy = Policy(layers, key=jax.random.PRNGKey(0))\n optimizer = optax.adamw(learning_rate)\n opt_state = eqx_helpers.eqx_init_optimiser(optimizer, policy)\n\n key = jax.random.PRNGKey(42)\n env = gym.make(env_name)\n\n losses = []\n all_rewards = []\n for i in tqdm(range(n_episodes)):\n key, subkey = jax.random.split(key)\n\n states, actions, rewards, dones = gym_helpers.rollout_discrete(\n env, get_action, {\"policy\": policy}, key=subkey\n )\n\n del subkey\n\n all_rewards.append(np.sum(rewards))\n\n policy, opt_state, loss = train(\n states, actions, rewards, dones, gamma, policy, opt_state, optimizer\n )\n\n losses.append(loss)\n\n if (i % 100 == 0 and i >= 100) or i == n_episodes - 1:\n print(\n f\"Episode {i} | R: {gym_helpers.moving_average(all_rewards, 100)[-1]}\"\n )\n print(f\"Episode {i} | L: {gym_helpers.moving_average(losses, 100)[-1]}\")\n\n mean_reward = np.mean(all_rewards)\n\n return mean_reward\n\n\ndef main() -> None:\n \"\"\"Run the experiment.\"\"\"\n env = gym.make(\"LunarLander-v2\")\n if env.observation_space.shape is None:\n raise ValueError(\"Observation space shape is None\")\n if env.action_space.n is None: # type: ignore\n raise ValueError(\"Action space n is None\")\n\n state_dims = env.observation_space.shape[0]\n n_actions = env.action_space.n # type: ignore\n\n hyperparams = {\n \"env_name\": \"LunarLander-v2\",\n \"learning_rate\": 1e-4,\n \"gamma\": 0.99,\n \"layers\": [state_dims, 256, 128, 64, n_actions],\n \"n_episodes\": 10000,\n }\n\n experiment(hyperparams)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Artur-Galstyan/equinox-rl","sub_path":"equinox_rl/policy_gradient.py","file_name":"policy_gradient.py","file_ext":"py","file_size_in_byte":6289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"39838945737","text":"#######################################################################################################################\n# flexible MLP model constructed using PyTorch and the PyTorch Lightning Wrapper Tool\n#######################################################################################################################\n# Problem with Tensorboard (officially available only with pytorch 1.3\n# - add function hparams to torch/utils/tensorboard/summary.py\n# - remove version check in pytorch_lightning/logggers/tensorboard.py\n# idea define own logger where all code is copied and just those changes implemented\n# access saved data by script and execute plotting: https://www.tensorflow.org/tensorboard/dataframe_api\n\n# import packages\nimport torch\nimport yaml\nfrom argparse import Namespace\nfrom typing import Optional\n\nfrom thunder_torch.models.ModelBase import LightningModelBase\nfrom thunder_torch.utils.option_class import OptionClass\nfrom thunder_torch import _modules_activation, _modules_loss, _modules_lr_scheduler, _modules_optim\nimport thunder_torch as tt\n\n\n# flexible MLP class\nclass LightningFlexMLP(LightningModelBase):\n \"\"\"\n Create flexMLP as PyTorch LightningModule\n\n Hyperparameters of the model\n ----------------------------\n - n_inp: int Input dimension (required)\n - n_out: int Output dimension (required)\n - hidden_layer: list List of hidden layers with number of hidden neurons as layer entry (required)\n - activation: str activation fkt that is included in torch.nn (default: ReLU)\n - loss: str loss fkt that is included in torch.nn (default: MSELoss)\n - optimizer: dict dict including optimizer fkt type and possible parameters, optimizer has to be\n included in torch.optim (default: {'type': Adam, 'params': {'lr': 1e-3}})\n - scheduler: dict dict including execute flag, scheduler fkt type and possible parameters,\n scheduler\n has to be included in torch.optim.lr_scheduler (default: {'execute': False})\n - num_workers: int number of workers in DataLoaders (default: 10)\n - batch: int batch size of DataLoaders (default: 64)\n - output_activation: str activation fkt (default: False)\n \"\"\"\n\n def __init__(self, hparams: Namespace) -> None:\n \"\"\"\n Initializes a flexMLP model based on the provided parameters\n\n Parameters\n ----------\n hparams - Namespace object including hyperparameters\n \"\"\"\n super().__init__()\n\n self.hparams = hparams\n self.check_hparams()\n self.get_default()\n self.get_functions()\n self.min_val_loss: Optional[torch.Tensor] = None\n\n # Construct MLP with a variable number of hidden layers\n self.layers_list = []\n self.construct_mlp(self.hparams.n_inp, self.hparams.hidden_layer, self.hparams.n_out)\n\n if hasattr(self.hparams, 'output_activation'):\n self.layers_list.append(getattr(torch.nn, self.hparams.output_activation)())\n\n self.layers = torch.nn.Sequential(*self.layers_list)\n\n # def training_step(self, batch, batch_idx) -> dict:\n # x, y = batch\n # y_hat = self(x)\n # loss = self.loss_fn(y_hat, y)\n # log = {'train_loss': loss}\n # results = {'loss': loss, 'log': log}\n # return results\n #\n # def validation_step(self, batch, batch_idx) -> dict:\n # x, y = batch\n # y_hat = self(x)\n # val_loss = self.loss_fn(y_hat, y)\n # return {'val_loss': val_loss}\n #\n # def validation_epoch_end(self, outputs) -> dict:\n # val_loss = torch.stack([x['val_loss'] for x in outputs]).mean()\n # if self.current_epoch == 0: self.min_val_loss = val_loss\n # if val_loss < self.min_val_loss:\n # self.min_val_loss = val_loss\n # log = {'avg_val_loss': val_loss}\n # pbar = {'val_loss': val_loss, 'min_val_loss': self.min_val_loss}\n # results = {'log': log, 'val_loss': val_loss, 'progress_bar': pbar}\n # return results\n #\n # def test_step(self, batch, batch_idx) -> dict:\n # x, y = batch\n # y_hat = self(x)\n # loss = self.loss_fn(y_hat, y)\n # return {'test_loss': loss}\n #\n # def test_epoch_end(self, outputs) -> dict:\n # test_loss = torch.stack([x['test_loss'] for x in outputs]).mean()\n # log = {'avg_test_loss': test_loss}\n # results = {'log': log, 'test_loss': test_loss}\n # return results\n\n # # def add_model_specific_args(parent_parser):\n # # parser = argparse.ArgumentParser(parents=[parent_parser])\n # # parser.add_argument('--features', type=list, default=['pode', 'Z', 'H', 'PV'])\n # # parser.add_argument('--labels', type=list, default=['T'])\n # # parser.add_argument('--n_hidden_neurons', nargs='+', type=int, default=[64, 64, 64])\n # # return parser\n\n @staticmethod\n def get_OptionClass() -> dict:\n options = {'hparams': OptionClass(template=LightningFlexMLP.yaml_template(['Model', 'params']))}\n options['hparams'].add_key('n_inp', dtype=int, required=True)\n options['hparams'].add_key('n_out', dtype=int, required=True)\n options['hparams'].add_key('hidden_layer', dtype=list, required=True)\n options['hparams'].add_key('output_activation', dtype=str, attr_of=_modules_activation)\n options['hparams'].add_key('activation', dtype=str, attr_of=_modules_activation)\n options['hparams'].add_key('loss', dtype=str, attr_of=_modules_loss)\n options['hparams'].add_key('optimizer', dtype=dict)\n options['hparams'].add_key('scheduler', dtype=dict)\n options['hparams'].add_key('num_workers', dtype=int)\n options['hparams'].add_key('batch', dtype=int)\n options['hparams'].add_key('lparams', dtype=Namespace)\n options['hparams'].add_key('lr', dtype=float)\n\n options['optimizer'] = OptionClass(template=LightningFlexMLP.yaml_template(['Model', 'params', 'optimizer']))\n options['optimizer'].add_key('type', dtype=str, attr_of=_modules_optim)\n options['optimizer'].add_key('params', dtype=dict, param_dict=True)\n\n options['scheduler'] = OptionClass(template=LightningFlexMLP.yaml_template(['Model', 'params', 'scheduler']))\n options['scheduler'].add_key('execute', dtype=bool)\n options['scheduler'].add_key('type', dtype=str, attr_of=_modules_lr_scheduler)\n options['scheduler'].add_key('params', dtype=dict, param_dict=True)\n\n return options\n\n @staticmethod\n def yaml_template(key_list: list) -> str:\n \"\"\"\n Yaml template for LightningFlexMLP\n \"\"\"\n template = {'Model': {'type': 'LightningFlexMLP',\n 'load_model': {'path': 'name.ckpt'},\n 'create_model': {'n_inp': 'int', 'n_out': 'int', 'hidden_layer': '[int, int, int]',\n 'output_activation': 'str (default: None)', 'activation':\n 'str (default: ReLU)'},\n 'params': {'loss': 'str (default:MSELoss)',\n 'optimizer': {'type': 'str (default: Adam)',\n 'params': {'lr': 'float (default: 1.e-3'}},\n 'scheduler': {'execute': ' bool (default: False)', 'type': 'name',\n 'params': {'cooldown': 'int', 'patience': 'int',\n 'min_lr': 'float'}},\n 'num_workers': 'int (default: 10)', 'batch': 'int (default: 64)'}}}\n\n template = tt.utils.get_by_path(template, key_list)\n\n return yaml.dump(template, sort_keys=False)\n","repo_name":"pascal-roth/thundertorch","sub_path":"thunder_torch/models/LightningFlexMLP.py","file_name":"LightningFlexMLP.py","file_ext":"py","file_size_in_byte":8104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"35344926823","text":"import wx\n\nclass TestFrame(wx.Frame):\n def __init__(self):\n wx.Frame.__init__(self, None, title=\"Frame\")\n dlg = wx.Dialog(self, title=\"Dialog\", style=wx.DEFAULT_DIALOG_STYLE | wx.FRAME_FLOAT_ON_PARENT)\n self.Show()\n dlg.Show()\n\napp = wx.PySimpleApp()\nframe = TestFrame()\napp.MainLoop()\n","repo_name":"wxWidgets/trac-attachments","sub_path":"ticket/177/177381520b4ea8c97c6000c0bcc3391b1839e450/2022bcc13c5500a97edc06f3589b5f8976c7e1d0.py","file_name":"2022bcc13c5500a97edc06f3589b5f8976c7e1d0.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"38976480813","text":"from flask import Flask, render_template, request\nfrom flask import redirect, jsonify, url_for, flash\nfrom sqlalchemy import create_engine, asc\nfrom sqlalchemy.orm import sessionmaker\nfrom database_setup import Base, Category, Book, User\nfrom flask import session as login_session\n\nimport requests\nimport json\nimport random\nimport string\nfrom oauth2client.client import flow_from_clientsecrets\nfrom oauth2client.client import FlowExchangeError\nimport httplib2\nfrom flask import make_response\nfrom flask_httpauth import HTTPBasicAuth\n\nCLIENT_ID = json.loads(open('client_secrets.json', 'r')\n .read())['web']['client_id']\n\napp = Flask(__name__)\napp.secret_key = 'super secret key'\n\n# connect to database\nengine = create_engine('sqlite:///category.db')\nBase.metadata.bind = engine\n\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\nauth = HTTPBasicAuth()\ng = User()\n\n\n@auth.verify_password\ndef verify_password(username, password):\n \"\"\"\n Method to verify password from database when using local authentication.\n :param username: Value to lookup.\n :param password: Value to verify.\n :return: Boolean\n \"\"\"\n user = session.query(User).filter_by(username=username).first()\n if not user or not user.verify_password(password):\n return False\n g.user = user\n return True\n\n\n@app.route('/users', methods=['POST'])\ndef new_user():\n \"\"\"\n Method to create a new user if not found.\n :return: JSON object with username.\n \"\"\"\n username = request.json.get('username')\n password = request.json.get('password')\n name = request.json.get('name')\n email = request.json.get('email')\n if username is None or password is None:\n abort(400)\n if session.query(User).filter_by(username=username).first() is not None:\n abort(400)\n user = User(username=username, name=name, email=email)\n user.hash_password(password)\n session.add(user)\n session.commit()\n return jsonify({'username': user.username}), 201\n\n\n@app.route('/protected_resource')\n@auth.login_required\ndef get_resource():\n \"\"\"\n Method to test local authentication.\n :return: JSON object with message/username.\n \"\"\"\n return jsonify({'data': 'Hello, %s!' % g.user.username})\n\n\n@app.route('/login')\ndef login():\n \"\"\"\n Method to create a random string for authentication.\n :return: Returns the login.html page and state set with a random string.\n \"\"\"\n state = ''.join(random.choice(string.ascii_uppercase + string.digits)\n for x in range(32))\n login_session['state'] = state\n return render_template('login.html', STATE=state)\n\n\n@app.route('/clearSession')\ndef clear_session():\n \"\"\"\n Method to clear the login session for local testing.\n :return: String message.\n \"\"\"\n login_session.clear()\n return \"Session cleared\"\n\n\n@app.route('/gconnect', methods=['POST'])\ndef gconnect():\n \"\"\"\n Udacity method to enable google authentication.\n :return: Welcome page upon success and redirected to the home page.\n \"\"\"\n # Validate state token\n if request.args.get('state') != login_session['state']:\n response = make_response(json.dumps('Invalid state parameter.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n # Obtain authorization code\n code = request.data\n\n try:\n # Upgrade the authorization code into a credentials object\n oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')\n oauth_flow.redirect_uri = 'postmessage'\n credentials = oauth_flow.step2_exchange(code)\n except FlowExchangeError:\n response = make_response(\n json.dumps('Failed to upgrade the authorization code.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Check that the access token is valid.\n access_token = credentials.access_token\n url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'\n % access_token)\n h = httplib2.Http()\n result = json.loads(h.request(url, 'GET')[1])\n # If there was an error in the access token info, abort.\n if result.get('error') is not None:\n response = make_response(json.dumps(result.get('error')), 500)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is used for the intended user.\n gplus_id = credentials.id_token['sub']\n if result['user_id'] != gplus_id:\n response = make_response(\n json.dumps(\"Token's user ID doesn't match given user ID.\"), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is valid for this app.\n if result['issued_to'] != CLIENT_ID:\n response = make_response(\n json.dumps(\"Token's client ID does not match app's.\"), 401)\n print(\"Token's client ID does not match app's.\")\n response.headers['Content-Type'] = 'application/json'\n return response\n\n stored_access_token = login_session.get('access_token')\n stored_gplus_id = login_session.get('gplus_id')\n if stored_access_token is not None and gplus_id == stored_gplus_id:\n response = make_response(json.dumps('Current user is already '\n 'connected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Store the access token in the session for later use.\n login_session['access_token'] = credentials.access_token\n login_session['gplus_id'] = gplus_id\n\n # Get user info\n userinfo_url = \"https://www.googleapis.com/oauth2/v1/userinfo\"\n params = {'access_token': credentials.access_token, 'alt': 'json'}\n answer = requests.get(userinfo_url, params=params)\n\n data = answer.json()\n\n login_session['username'] = data['name']\n login_session['picture'] = data['picture']\n login_session['email'] = data['email']\n\n # check if user exists otherwise create a new user\n user_email = session.query(User)\\\n .filter_by(email=login_session['email']).one_or_none()\n\n login_session['user_id'] = user_email.id\n\n if not user_email:\n user = User(username=login_session['username'],\n picture=login_session['picture'],\n email=login_session['email'])\n session.add(user)\n session.commit()\n\n flash('Created %s user.' % user.email)\n login_session['id'] = session.query(User)\\\n .filter_by(email=user.email).one_or_none()\n\n output = ''\n output += '

Welcome, '\n output += login_session['username']\n output += '!

'\n output += ''\n flash(\"you are now logged in as %s\" % login_session['username'])\n print(\"done!\")\n return output\n\n\n@app.route('/gdisconnect')\ndef gdisconnect():\n \"\"\"\n Udacity method to disconnect from your google account\n :return: Redirect to homepage without being logged in otherwise a\n failure message.\n \"\"\"\n access_token = login_session.get('access_token')\n if access_token is None:\n print('Access Token is None')\n response = make_response(json.dumps('Current user not connected.'),\n 401)\n response.headers['Content-Type'] = 'application/json'\n # return response\n return redirect(url_for('show_categories'))\n print('In gdisconnect access token is %s', access_token)\n print('User name is: ')\n print(login_session['username'])\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' \\\n % login_session['access_token']\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n print('result is ')\n print(result)\n if result['status'] == '200':\n del login_session['access_token']\n del login_session['gplus_id']\n del login_session['username']\n del login_session['email']\n del login_session['picture']\n response = make_response(json.dumps('Successfully disconnected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n # return response\n return redirect(url_for('show_categories'))\n else:\n response = make_response(json.dumps('Failed to revoke token for '\n 'given user.'), 400)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n\n@app.route('/', methods=['GET'])\ndef show_categories():\n \"\"\"\n Method to show all existing categories.\n :return: List of categories and a redirect to home.html.\n \"\"\"\n categories = session.query(Category).order_by(Category.name).all()\n return render_template('home.html', categories=categories)\n\n\n@app.route('/category/create/', methods=['GET', 'POST'])\ndef create_category():\n \"\"\"\n Method to create a new category.\n :return: Redirect to show categories on a POST, otherwise a redirect\n to category_modify.html.\n \"\"\"\n if request.method == 'POST':\n category = Category(name=request.form['name'],\n description=request.form['description'],\n user_id=login_session['user_id'])\n session.add(category)\n session.commit()\n flash('Created %s category.' % category.name)\n return redirect(url_for('show_categories'))\n else:\n return render_template('category_modify.html')\n\n\n@app.route('/category//update', methods=['GET', 'POST'])\ndef update_category(category_id):\n \"\"\"\n Method to update a category.\n :param category_id: Category id to be updated.\n :return: Show the category and books on a POST, otherwise a redirect\n to category_modify.html.\n \"\"\"\n category = session.query(Category).filter_by(id=category_id).one()\n if category.user_id != login_session['user_id']:\n flash('''You are not authorized to edit this item.\n Create your own item to edit.''')\n return redirect(url_for('show_category_books',\n category_id=category_id))\n if request.method == 'POST':\n if request.form['name']:\n category.name = request.form['name']\n if request.form['description']:\n category.description = request.form['description']\n flash('Updated %s category.' % category.name)\n return redirect(url_for('show_category_books',\n category_id=category_id))\n else:\n return render_template('category_modify.html', category_id=category_id,\n category=category)\n\n\n@app.route('/category//delete', methods=['GET', 'POST'])\ndef delete_category(category_id):\n \"\"\"\n Method to delete a category and any associated books.\n :param category_id: Category id to be deleted.\n :return: Redirect to the homepage on a POST, otherwise a redirect\n to category_delete.html.\n \"\"\"\n category = session.query(Category).filter_by(id=category_id).one()\n books = session.query(Book).filter_by(category_id=category_id).all()\n if category.user_id != login_session['user_id']:\n flash('''You are not authorized to edit this item.\n Create your own item to edit.''')\n return redirect(url_for('show_category_books',\n category_id=category_id))\n\n if request.method == 'POST':\n if category is not None:\n for book in books:\n session.delete(book)\n flash('Deleted %s book..' % book.name)\n session.commit()\n session.delete(category)\n flash('Deleted %s category.' % category.name)\n session.commit()\n return redirect(url_for('show_categories'))\n else:\n return render_template('category_delete.html', category=category,\n books=books)\n\n\n@app.route('/category//book', methods=['GET'])\ndef show_category_books(category_id):\n \"\"\"\n Method to show a category and associated books.\n :param category_id: Category id to be displayed.\n :return: Redirect to the category/books page on a POST, otherwise a\n redirect to book_modify.html.\n \"\"\"\n category = session.query(Category).filter_by(id=category_id).first()\n books = session.query(Book).filter_by(category_id=category_id)\\\n .order_by(Book.name).all()\n return render_template('category_books.html', category=category,\n books=books)\n\n\n@app.route('/category//book/create', methods=['GET', 'POST'])\ndef create_book(category_id):\n \"\"\"\n Mehtod to create a new book in a category.\n :param category_id: Category id the book should be linked too.\n :return: Redirect to show the category and books on a POST, otherwise\n a redirect to book_modify.html.\n \"\"\"\n category = session.query(Category).filter_by(id=category_id).first()\n if request.method == 'POST':\n book = Book(name=request.form['name'], author=request.form['author'],\n category_id=category_id, user_id=login_session['user_id'])\n session.add(book)\n session.commit()\n flash('Created %s book.' % book.name)\n return redirect(url_for('show_category_books',\n category_id=category_id))\n else:\n return render_template('book_modify.html', category=category)\n\n\n@app.route('/category//book//update',\n methods=['GET', 'POST'])\ndef update_book(category_id, book_id):\n \"\"\"\n Method to updatte a book.\n :param category_id: Category id the book is associated too.\n :param book_id: Book id to be updated.\n :return: Redirect to show the category/books on a POST, otherwise a\n redirect to book_modify.html.\n \"\"\"\n category = session.query(Category).filter_by(id=category_id).one()\n book = session.query(Book).filter_by(id=book_id).one()\n\n if book.user_id != login_session['user_id']:\n flash('''You are not authorized to edit this item.\n Create your own item to edit.''')\n return redirect(url_for('show_category_books',\n category_id=category_id))\n\n if request.method == 'POST':\n if request.form['name']:\n book.name = request.form['name']\n if request.form['author']:\n book.author = request.form['author']\n session.add(book)\n session.commit()\n flash('Updated %s book.' % book.name)\n return redirect(url_for('show_category_books',\n category_id=category_id))\n else:\n return render_template('book_modify.html', category=category,\n book=book)\n\n\n@app.route('/category//book//delete',\n methods=['GET', 'POST'])\ndef delete_book(category_id, book_id):\n \"\"\"\n Method to delete a book.\n :param category_id: Category id the book is associated to.\n :param book_id: Book id to be deleted.\n :return: Redirect to show the category/books.\n \"\"\"\n book = session.query(Book).filter_by(id=book_id).one()\n\n if book.user_id != login_session['user_id']:\n flash('''You are not authorized to edit this item.\n Create your own item to edit.''')\n return redirect(url_for('show_category_books',\n category_id=category_id))\n\n if request.method == 'POST':\n session.delete(book)\n session.commit()\n flash('Deleted %s book.' % book.name)\n return redirect(url_for('show_category_books', category_id=category_id))\n\n\n@app.route('/categories/api')\ndef api_categories():\n \"\"\"\n Method to display all the categories.\n :return: JSON list of categories.\n \"\"\"\n categories = session.query(Category).order_by(Category.name).all()\n categories_json = [c.serialize for c in categories]\n return jsonify(Category=categories_json)\n\n\n@app.route('/books/api')\ndef api_books():\n \"\"\"\n Method to display all the books.\n :return: JSON list of all books.\n \"\"\"\n books = session.query(Book).order_by(Book.name).all()\n books_json = [i.serialize for i in books]\n return jsonify(Book=books_json)\n\n\n@app.route('/category//book//api')\ndef api_category_books(category_id, book_id):\n \"\"\"\n Method to display a category and associated books.\n :return: JSON list of categories/books.\n \"\"\"\n category = session.query(Category).filter_by(id=category_id).one_or_none()\n if category is not None:\n category_json = category.serialize\n if len(category_json) != 0:\n book = session.query(Book)\\\n .filter_by(category_id=category_id, id=book_id).one_or_none()\n if book is not None:\n book_json = book.serialize\n\n if len(book_json) != 0:\n category_json[\"Books\"] = book_json\n else:\n category_json = {}\n return jsonify(Category=category_json)\n\n\n@app.route('/category/books/api')\ndef api_all_category_books():\n \"\"\"\n Method to display all categories and associated books.\n :return: JSON list of categories/books.\n \"\"\"\n categories = session.query(Category).order_by(Category.name).all()\n categories_json = [c.serialize for c in categories]\n for c in range(len(categories_json)):\n books = session.query(Book)\\\n .filter_by(category_id=categories_json[c]['id']).all()\n books_json = [i.serialize for i in books]\n\n if len(books_json) != 0:\n categories_json[c][\"Books\"] = books_json\n return jsonify(Category=categories_json)\n\n\n@app.route('/users/api')\ndef api_users():\n \"\"\"\n Method to display all users.\n :return: JSON list of all users.\n \"\"\"\n users = session.query(User).order_by(User.email).all()\n users_json = [i.serialize for i in users]\n return jsonify(User=users_json)\n\n# todo Code is ready for personal review and neatly formatted and\n# compliant with the Python PEP 8 style guide.\n# todo README file includes details of all the steps required to\n# successfully run the application.\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run(host='0.0.0.0', port=5000, threaded=False)\n","repo_name":"aandersland/udacity-full-stack-web-developer-nanodegree","sub_path":"catalog_app/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":18455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"21093554495","text":"from collections import Counter\n\nclass Solution(object):\n def equalFrequency(self, word):\n \"\"\"\n :type word: str\n :rtype: bool\n \"\"\"\n #x = Counter(word)):\n for i in range(len(word)):\n if len(set(Counter(word[:i]+word[i+1:]).values()))==1:\n return True\n return False","repo_name":"omkarkavi/leetcode-solutions","sub_path":"remove-letter-to-equlize-frequency.py","file_name":"remove-letter-to-equlize-frequency.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"29131395452","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport cv2\nimport pandas as pd\nimport os\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nfrom tqdm import tqdm\nimport csv\nimport h5py\nimport sklearn.metrics as metrics\n\nfrom tensorflow.keras.callbacks import ModelCheckpoint,CSVLogger,LearningRateScheduler\nfrom tensorflow.keras.models import Model,Sequential\nfrom tensorflow.keras.layers import Conv2D\nfrom tensorflow.keras.layers import Input\nfrom tensorflow.keras.layers import LSTM, Bidirectional\nfrom tensorflow.keras.layers import Dropout\nfrom tensorflow.keras.layers import MaxPooling2D\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import Flatten\nfrom tensorflow.keras.layers import TimeDistributed\nfrom tensorflow.keras.layers import BatchNormalization\nfrom tensorflow.keras.layers import AveragePooling2D\nfrom tensorflow.keras.layers import Activation\nfrom tensorflow.keras.layers import add\nfrom tensorflow.keras.regularizers import l2\nfrom tensorflow.keras import optimizers\nfrom tensorflow.compat.v1 import ConfigProto\nfrom tensorflow.compat.v1 import InteractiveSession\nfrom tensorflow.keras import backend as K\nconfig = ConfigProto()\nconfig.gpu_options.allow_growth = True\nsession = InteractiveSession(config=config)\n\ndata_dir = r'/home/boonyew/Documents/ICVL/Training/'\nval_dir = r'/home/boonyew/Documents/ICVL/Testing/'\n\ncenter_file = os.path.join(data_dir,'center_train_refined.txt')\nlabel_file = os.path.join(data_dir,'labels.txt')\n\ntest_center_file = os.path.join(val_dir,'center_test_refined.txt')\ntest_label_file = os.path.join(val_dir,'test_seq_1.txt')\n#data_dir = r'C:\\Users\\angbo\\Documents\\MTech ISS\\Capstone\\HandJointRehab\\ICVL'\n\nfx = 240.99\nfy = 240.96\nu0 = 160\nv0 = 120\n\nheight=240\nwidth=320\nkeypointsNumber = 16\ncropWidth = 120\ncropHeight = 120\nbatch_size = 8\nxy_thres = 95\ndepth_thres = 150\n\ndef pixel2world(x, fx, fy, ux, uy):\n \"\"\"\n Converts coordinates from Image coordinates (xyz) to World coordinates (uvd)\n \n \"\"\"\n\n x[:, 0] = (x[:, 0] - ux) * x[:, 2] / fx\n x[:, 1] = (x[:, 1] - uy) * x[:, 2] / fy\n return x\n\ndef world2pixel(x, fx, fy, ux, uy):\n \"\"\"\n Converts coordinates from World coordinates (uvd) to Image coordinates (xyz) \n \n \"\"\"\n x[:, 0] = x[:, 0] * fx / x[:, 2] + ux\n x[:, 1] = x[:, 1] * fy / x[:, 2] + uy\n return x\n\ndef loadAnnotations(filename,centers,val=False):\n f = open(filename,'r')\n fc = open(centers,'r')\n files = {}\n labels = {}\n for idx,line in enumerate(f):\n if line.rstrip().split(' ') != ['']:\n center = next(fc)\n # print(center)\n if center.split(' ')[0] == 'invalid':\n pass\n \n else:\n joints = line.rstrip().split(' ')\n filename = joints[0]\n temp = filename.split('/')\n# print(temp)\n if len(temp) > 2:\n sub,seq,img = temp\n else:\n sub = '200'\n seq,img = temp\n # joints[-1] = joints[-1].rstrip()\n # print(joints)\n temp = np.reshape(np.array(joints[1:],dtype='float32'),(keypointsNumber,3))\n # temp[:,2] = -temp[:,2]\n # temp = world2pixel(temp,241.42,241.42,160,120)\n # temp = joints3DToImg(temp)\n center = center.split(' ')\n center[2] = center[2].rstrip()\n center = np.array(center,dtype='float32')\n labels.setdefault(sub,{})\n labels[sub].setdefault(seq,{})\n labels[sub][seq][img] = {}\n labels[sub][seq][img]['labels'] = temp\n labels[sub][seq][img]['center'] = center\n labels[sub][seq][img]['bbox'] = get_bbox(center.reshape((1,3)))\n files.setdefault(sub,{})\n files[sub].setdefault(seq,[])\n files[sub][seq].append(img)\n f.close()\n fc.close()\n return labels,files\n\n\ndef get_bbox(centers):\n \n# centre_test_world = pixel2world(centers.copy(), fx, fy, u0, v0)\n\n centerlefttop_test = centers.copy()\n centerlefttop_test[:,0] = centerlefttop_test[:,0]-xy_thres\n centerlefttop_test[:,1] = centerlefttop_test[:,1]+xy_thres\n\n centerrightbottom_test = centers.copy()\n centerrightbottom_test[:,0] = centerrightbottom_test[:,0]+xy_thres\n centerrightbottom_test[:,1] = centerrightbottom_test[:,1]-xy_thres\n\n lefttop_pixel = world2pixel(centerlefttop_test, fx, fy, u0, v0)\n rightbottom_pixel = world2pixel(centerrightbottom_test, fx, fy, u0, v0)\n\n Xmin = max(lefttop_pixel[:,0], 0)\n Ymin = max(rightbottom_pixel[:,1], 0) \n Xmax = min(rightbottom_pixel[:,0], width - 1)\n Ymax = min(lefttop_pixel[:,1], height - 1)\n\n return (int(Xmin),int(Xmax),int(Ymin),int(Ymax)) # Left Right Top Bottom\n\ndef loadDepthMap(files,base_dir,sub,seq,file,return_bbox=False):\n \"\"\"\n Read a depth-map\n :param filename: file name to load\n :return: image data of depth image\n \"\"\"\n filename = os.path.join(base_dir,'Depth',sub,seq,file)\n# print(filename)\n# img = cv2.imread(filename)\n img = np.array(Image.open(filename))\n width, height = img.shape\n left,right,top,bottom = files[sub][seq][file]['bbox']\n joints = files[sub][seq][file]['labels']\n center = files[sub][seq][file]['center']\n \n imCrop = img.copy()[int(top):int(bottom), int(left):int(right)]\n\n imgResize = cv2.resize(imCrop, (cropWidth, cropHeight), interpolation=cv2.INTER_NEAREST)\n imgResize = np.asarray(imgResize,dtype = 'float32')\n# imgResize = imgResize*-1\n imgResize[np.where(imgResize >= int(center[2]) + depth_thres)] = int(center[2])\n imgResize[np.where(imgResize <= int(center[2]) - depth_thres)] = int(center[2]) \n# \n imgResize = (imgResize - int(center[2]))\n \n joints = resizeJoints(joints,left,right,top,bottom,center)\n \n if return_bbox:\n return imgResize,joints,img,(left,right,top,bottom),center\n else:\n return imgResize, joints\n\ndef resizeJoints(joints,left,right,top,bottom,center):\n label_xy = np.ones((keypointsNumber, 3), dtype = 'float32') \n labelOutputs = np.ones((keypointsNumber, 3), dtype = 'float32') \n label_xy[:,0] = (joints[:,0].copy() - left)*cropWidth/(right - left) \n label_xy[:,1] = (joints[:,1].copy() - top)*cropHeight/(bottom - top) \n \n labelOutputs[:,1] = label_xy[:,1]\n labelOutputs[:,0] = label_xy[:,0] \n labelOutputs[:,2] = joints[:,2] - center[2]\n labelOutputs = np.asarray(labelOutputs).flatten()\n \n return labelOutputs\n\ndef returnJoints(joints,left,right,top,bottom,center):\n label_xy = np.ones((keypointsNumber, 3), dtype = 'float32') \n labelOutputs = np.ones((keypointsNumber, 3), dtype = 'float32') \n label_xy[:,0] = (joints[:,0].copy()*(right - left)/cropWidth) + left \n label_xy[:,1] = (joints[:,1].copy()*(bottom - top)/cropHeight) + top \n \n labelOutputs[:,1] = label_xy[:,1]\n labelOutputs[:,0] = label_xy[:,0] \n labelOutputs[:,2] = joints[:,2] +center[2]\n labelOutputs = np.asarray(labelOutputs).flatten()\n \n return labelOutputs\n\n# Debug code\nfiles,filelists = loadAnnotations(label_file,center_file)\ntest_files,test_filelists = loadAnnotations(test_label_file,test_center_file)\n\n#for idx in tqdm(range(1,kjoints.shape[1]+1)):\n# img,label = loadDepthMap(data_dir,kjoints,bbox,centers,idx)\n\n\n\n#test_paths = test_files['200']['test_seq_1'].keys()\n#test_imgs = [loadDepthMap(test_files,val_dir,'200','test_seq_1',i) for i in list(test_paths)]\n#imgs, xlabels = zip(*test_imgs)\n#imgs = np.array(imgs)\n#xlabels = np.array(xlabels)\n#plt.imshow(imgs[10])\n#for x,y,z in np.reshape(xlabels[2],(16,3)):\n# plt.plot(x,y,color='green', marker='o')\n###-------------------------------------------------------------------------------####\n \nframes=5\n\ndef generate_data_val(files,filelist,frames,batch_size):\n \"\"\"Replaces Keras' native ImageDataGenerator.\"\"\"\n \n sub_idx =0\n seq_idx=0\n file_idx=0\n batch_size=batch_size\n # frames = 5\n while True:\n batch_frames=[]\n batch_labels=[]\n for b in range(batch_size):\n sub_list =list(filelist.keys())\n sub_name = sub_list[sub_idx]\n seq_list = list(filelist[sub_name].keys())\n seq_name =seq_list[seq_idx]\n file_list = filelist[sub_name][seq_name]\n file_list.sort()\n end_idx = len(file_list) - frames\n seq_frames=[]\n# print(len(batch_frames),sub_name,seq_name,file_idx)\n for i in range(frames):\n try:\n frame_name = file_list[file_idx+i]\n frame,label = loadDepthMap(files,val_dir,sub_name,seq_name,frame_name)\n seq_frames.append(frame)\n if i == frames-1:\n batch_labels.append(label)\n except:\n seq_frames = None\n break\n file_idx +=1\n if seq_frames:\n seq_frames= np.array(seq_frames)\n batch_frames.append(seq_frames)\n if file_idx >= end_idx+1:\n if seq_idx >= len(seq_list)-1:\n if sub_idx >= len(sub_list)-1:\n sub_idx = 0\n else:\n sub_idx +=1\n seq_idx =0\n else:\t\n seq_idx +=1\n file_idx = 0\n \n image_batch = np.array(batch_frames)\n image_label = np.array(batch_labels)\n image_batch = np.expand_dims(image_batch,4)\n \n yield image_batch,image_label\n \ndef generate_data(files,filelist,frames,batch_size):\n \"\"\"\n Custom data generator for generating sequences of N Frames for K Batches\n \n \"\"\"\n batch_size=batch_size\n # frames = 5\n while True:\n batch_frames=[]\n batch_labels=[]\n for b in range(batch_size):\n # Generate random index for sub,seq,start_frame\n sub_list =list(filelist.keys())\n sub_idx = np.random.randint(0,len(sub_list))\n sub_name = sub_list[sub_idx]\n seq_list = list(filelist[sub_name].keys())\n seq_idx = np.random.randint(0,len(seq_list))\n seq_name =seq_list[seq_idx]\n file_list =filelist[sub_name][seq_name]\n file_list.sort()\n factor = np.random.randint(1,2)\n end_idx = len(file_list) - frames*factor\n file_idx = np.random.randint(0,end_idx)\n seq_frames=[]\n# print(len(batch_frames),sub_name,seq_name,file_idx)\n for i in range(frames):\n try:\n frame_name = file_list[file_idx+i*factor]\n frame,label = loadDepthMap(files,data_dir,sub_name,seq_name,frame_name)\n seq_frames.append(frame)\n if i == frames-1:\n batch_labels.append(label)\n except:\n seq_frames = None\n break\n if seq_frames:\n seq_frames= np.array(seq_frames)\n batch_frames.append(seq_frames)\n \n image_batch = np.array(batch_frames)\n image_label = np.array(batch_labels)\n image_batch = np.expand_dims(image_batch,4)\n \n yield image_batch,image_label\n#\t\t \n##\n#x=generate_data_val(test_files,test_filelists,5,5)\n#y=next(x)\n## \ndef ConvModel(): \n model = Sequential()\n model.add(Conv2D(16, (7,7), activation='relu', padding='same', input_shape=(cropHeight,cropWidth,1),kernel_initializer='he_normal',\n kernel_regularizer=l2(1e-4)))\n model.add(BatchNormalization())\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Conv2D(32, (7,7), activation='relu', padding='same',kernel_initializer='he_normal',\n kernel_regularizer=l2(1e-4)))\n model.add(BatchNormalization())\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Conv2D(64, (5,5), activation='relu', padding='same',kernel_initializer='he_normal',\n kernel_regularizer=l2(1e-4)))\n model.add(BatchNormalization())\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Conv2D(128, (5,5), activation='relu', padding='same',kernel_initializer='he_normal',\n kernel_regularizer=l2(1e-4)))\n model.add(BatchNormalization())\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Conv2D(256, (3,3), activation='relu', padding='same',kernel_initializer='he_normal',\n kernel_regularizer=l2(1e-4)))\n model.add(BatchNormalization())\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Conv2D(512, (3,3), activation='relu', padding='same',kernel_initializer='he_normal',\n kernel_regularizer=l2(1e-4)))\n model.add(BatchNormalization())\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Flatten())\n # model.add(Dense(256,activation='relu'))\n return model\n # fix random seed for reproducibility\nseed = 29\nnp.random.seed(seed)\noptmz = optimizers.Adam(lr=0.001)\n # define the deep learning model\ndef mean_joint(y_true,y_pred):\n y_pred = K.reshape(y_pred, (160,3))\n y_true = K.reshape(y_true, (160,3))\n return K.mean(K.sum(abs(y_true-y_pred),axis=1))\n\n\ndef LSTMModel():\n model = Sequential()\n model.add(TimeDistributed(ConvModel(),input_shape=(frames,cropHeight, cropWidth,1)))\n# model.add(LSTM(2048,\n# return_sequences=True,\n# dropout=0.25,\n# recurrent_dropout=0.25))\n# model.add(LSTM(512,\n# return_sequences=True,\n# dropout=0.25,\n# recurrent_dropout=0.25))\n # model.add(LSTM(2048,\n # dropout=0.5))\n model.add(Bidirectional(LSTM(1536,dropout=0.5)))\n # model.add(Dense(256,activation='relu'))\n model.add(Dense(48,activation='relu'))\n model.compile(loss=mean_joint,\n optimizer=optmz,\n metrics=['mse','mae'])\n return model\n\nmodel = LSTMModel()\nmodel.summary()\n\ndef lrSchedule(epoch):\n lr = 1e-3\n \n if epoch > 10:\n lr *= 0.5e-3\n \n elif epoch > 8:\n lr *= 1e-3\n \n elif epoch > 6:\n lr *= 1e-2\n \n elif epoch > 4:\n lr *= 1e-1\n \n print('Learning rate: ', lr)\n \n return lr\n\nLRScheduler = LearningRateScheduler(lrSchedule)\n\nmodelname = 'icvl_clstm_basic_120_new_fix'\nfilepath = modelname + \".hdf5\"\ncheckpoint = ModelCheckpoint(filepath, \n monitor='val_loss', \n verbose=0, \n save_best_only=True, \n mode='min')\n\n # Log the epoch detail into csv\ncsv_logger = CSVLogger(modelname +'.csv')\ncallbacks_list = [checkpoint,csv_logger]\n\n\nbatch_size = 10\n\nmodel.fit_generator(\n generate_data(files,filelists,frames,batch_size),\n epochs=10,\n validation_data=generate_data_val(test_files,test_filelists,frames,batch_size),\n validation_steps=695//batch_size,\n steps_per_epoch=320000//batch_size,\n verbose=True,\n callbacks=callbacks_list)\n\n\n#### TEST SCRIPT####\n\ndef loadModel(modelpath):\n modelGo=LSTMModel()\n modelGo.load_weights(modelpath)\n modelGo.compile(loss=mean_joint, \n optimizer=optmz, \n metrics=['mse','mae'])\n return modelGo\n\ndef testPipeline(modelGo,base_dir,files,filelist,sub,seq,file_idx):\n \"\"\"\n Read a depth-map\n :param filename: file name to load\n :return: image data of depth image\n \"\"\"\n\n sub_idx =sub\n seq_idx=seq\n file_idx=file_idx\n frames = 5\n batch_labels=[]\n sub_list =list(filelist.keys())\n sub_name = sub_list[sub_idx]\n seq_list = list(filelist[sub_name].keys())\n seq_name =seq_list[seq_idx]\n file_list = filelist[sub_name][seq_name]\n file_list.sort()\n end_idx = len(file_list) - frames\n seq_frames=[]\n# print(len(batch_frames),sub_name,seq_name,file_idx)\n for i in range(frames):\n if file_idx +i < 0:\n file_id = 0\n else:\n file_id = file_idx +i \n frame_name = file_list[file_id]\n frame,label, a ,bbox,c = loadDepthMap(files,base_dir,sub_name,seq_name,frame_name,True)\n seq_frames.append(frame)\n if i == frames-1:\n batch_labels.append(label)\n img = a\n img[np.where(img >= 1000)] =0\n predict_image = frame\n left,right,top,bottom = bbox\n center = c\n seq_frames= np.expand_dims(np.array(seq_frames),3)\n seq_frames = np.expand_dims(seq_frames,0)\n predictions = modelGo.predict(seq_frames)\n predictions = predictions.reshape((keypointsNumber,3))\n predict_joints = returnJoints(predictions,left,right,top,bottom,center)\n gtlabel = returnJoints(np.array(label).reshape((16,3)),left,right,top,bottom,center)\n return img,predict_joints,gtlabel\n#\nmodelpath = './result/icvl_clstm_basic_120_new_fix.hdf5'\nmodelGo = loadModel(modelpath)\n\ndef testImg(modelGo,sub=0,seq=0,idx=0):\n \n x,y,truey = testPipeline(modelGo,val_dir,test_files,test_filelists,sub,seq,idx)\n# img = draw_pose(x,np.reshape(y,(16,3)))\n plt.imshow(x)\n for x,y1,z in np.reshape(y,(16,3)):\n plt.plot(x,y1,color='green', marker='o')\n for x,y1,z in np.reshape(truey,(16,3)):\n plt.plot(x,y1,color='red', marker='o')\n\ntest_predict_labels = []\ngt_labels = []\nfor s in [('center_test_refined.txt','test_seq_1.txt'),('center_test2.txt','test_seq_2.txt')]:\n test_center_file = os.path.join(val_dir,s[0])\n test_label_file = os.path.join(val_dir,s[1])\n test_files,test_filelists = loadAnnotations(test_label_file,test_center_file)\n seq_len = len(test_filelists['200'][s[1].split('.')[0]])\n for idx in range(-4,seq_len-4):\n x,y,truey = testPipeline(modelGo,val_dir,test_files,test_filelists,0,0,idx)\n test_predict_labels.append(y)\n gt_labels.append(truey)\nnp.savetxt('icvl_predict.txt',np.array(test_predict_labels),fmt='%.3f')\n\nfrom multicam.util import draw_pose,draw_angles\n\nx,y,truey = testPipeline(modelGo,val_dir,test_files,test_filelists,0,0,270)\nimg = draw_pose(y.reshape((16,3)),'icvl',x)\ncv2.imwrite('icvl7.png',img)\n\ntest_predict_labels = np.array(test_predict_labels).reshape((1596,16,3))\n\ntest_angles = []\nfor i in test_predict_labels:\n temp = list(util.draw_angles(i,'icvl').values())\n test_angles.append(temp)\n\ntest_angles = np.array(test_angles)\n\ngt_labels = np.array(gt_labels).reshape((1596,16,3))\n\ngt_angles = []\nfor i in gt_labels:\n temp = list(util.draw_angles(i,'icvl').values())\n gt_angles.append(temp)\n\ngt_angles = np.array(gt_angles)\n\nnp.mean(abs(test_angles - gt_angles))\n\n#testImg(modelGo,0,0,150)\n#\n#modelGo.evaluate_generator(generate_data_val(test_files,test_filelists,frames,batch_size), steps=890//batch_size,verbose=1)\n##test_predict = modelGo.predict_generator(generate_data_val(test_files,test_filelists,frames,batch_size), steps=890//batch_size,verbose=1)\n","repo_name":"boonyew/Hand-Joint-Rehab","sub_path":"icvl_basic.py","file_name":"icvl_basic.py","file_ext":"py","file_size_in_byte":19239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"18670386160","text":"import base64\nimport io\nfrom PIL import Image\nfrom fastapi import FastAPI\nfrom compress import compress_to_webp\nfrom pydantic import BaseModel\nfrom fastapi.middleware.cors import CORSMiddleware\n\n\n\nclass DataField(BaseModel):\n data: str\n\napp = FastAPI()\n\norigins = [\n \"*\"\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n@app.post(\"/compress/\")\nasync def compress(request: DataField):\n try:\n databytes = base64.b64decode(request.data)\n img = Image.open(io.BytesIO(databytes))\n compressed_data = compress_to_webp(img)\n img.close()\n return { 'data': compressed_data }\n except Exception as e:\n return { 'error': str(e) }","repo_name":"aidatorajiro/rakugaki","sub_path":"backend/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"8762147764","text":"from django.db import models\nfrom django.contrib.auth.models import User\n\n# UserInfo saves teacher status for users\nclass UserInfo(models.Model):\n user = models.OneToOneField(User)\n is_teacher = models.BooleanField(default=False)\n\n# Course saves titles for course ids\nclass Course(models.Model):\n service_id = models.IntegerField()\n title = models.TextField()\n\n# Quiz saves titles for quiz ids\nclass Quiz(models.Model):\n service_id = models.IntegerField()\n title = models.TextField()\n","repo_name":"jzuber4/Senior-Independent-Work","sub_path":"quiz_service/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"72486467207","text":"# Do not modify these lines\n__winc_id__ = '71dd124b4a6e4d268f5973db521394ee'\n__human_name__ = 'strings'\n\n# Add your code after this line\n\nscorer_0 = \"Ruud Gullit\"\nscorer_1 = \"Marco van Basten\"\ngoal_0 = 32\ngoal_1 = 54\n\nscorers = scorer_0 + \" \" + str(goal_0) + \", \" + scorer_1 + \" \" + str(goal_1)\n\n\nreport = scorer_0 + \" scored in the \" + str(goal_0) + \"nd minute\" + '\\n' + scorer_1 + \" scored in the \" + str(goal_1) + \"th minute\" \n\n\n\n\n#### EDITED FIXED PART!!!! \n\nplayer = scorer_0 # or 1?\n\nspace = player.find(\" \")\nfirst_name = player[0:space]\nlast_name = player[space + 1:len(player)]\n\n\nlast_name_len = len(player[space + 1:len(player)])\n\nname_short = first_name[0] + \". \" +last_name\n\n\ncheer = ((first_name + \"! \") * len(first_name))\nchant = cheer[0:len(cheer) -1] \n\ngood_chant = ' ' not in chant[len(chant) -1] # my brain can't crack this one :( ...... it works though \n","repo_name":"HotBBQSauce/PublicWincAcademy","sub_path":"strings/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"71496798408","text":"\"\"\"\nMeta information about languages.\n\"\"\"\nimport logging\nimport os\nimport re\nimport sys\n\nfrom collections import namedtuple\n\nlog = logging.getLogger(__name__)\n\n# Global variables initialized with L{set_all_languages}.\nall_languages = None\ngrflangid = None\nisocode = None\n\n\nclass PluralData:\n \"\"\"\n @ivar plural: Plural form number.\n @type plural: C{int}\n\n @ivar count: Number of plural forms.\n @type count: C{int}\n\n @ivar description: Descriptio of plural forms.\n @type description: C{list} of C{str}\n \"\"\"\n\n def __init__(self, plural, description):\n self.plural = plural\n self.description = description\n self.count = len(description)\n\n\nall_plurals = {\n None: PluralData(None, []),\n 0: PluralData(0, [\"1\", \"other\"]),\n 1: PluralData(1, [\"other\"]),\n 2: PluralData(2, [\"0..1\", \"other\"]),\n 3: PluralData(3, [\"1,21,31,...\", \"other\", \"0\"]),\n 4: PluralData(4, [\"1\", \"2\", \"3..6\", \"7..10\", \"other\"]),\n 5: PluralData(5, [\"1,21,31,...\", \"2..9,22..29,32..39,...\", \"other\"]),\n 6: PluralData(6, [\"1,21,31,...\", \"2..4,22..24,32..34,...\", \"other\"]),\n 7: PluralData(7, [\"1\", \"2..4,22..24,32..34,...\", \"other\"]),\n 8: PluralData(8, [\"1,101,201,...\", \"2,102,202,...\", \"3..4,103..104,203..204,...\", \"other\"]),\n 9: PluralData(9, [\"1,21,31,...\", \"other\"]),\n 10: PluralData(10, [\"1\", \"2..4\", \"other\"]),\n 11: PluralData(11, [\"yeong,il,sam,yuk,chil,pal\", \"i,sa,o,gu\"]),\n 12: PluralData(12, [\"1\", \"0,2..10,102..110,202..210,...\", \"11..19,111..119,211..219,...\", \"other\"]),\n 13: PluralData(13, [\"1,11\", \"2,12\", \"3..10,13..19\", \"other\"]),\n 14: PluralData(14, [\"1\", \"0,2..19,101..119,201..219,...\", \"other\"]),\n}\n\n\nclass LanguageData:\n \"\"\"\n @ivar filename: Name of language file without extension.\n @type filename: C{str}\n\n @ivar name: Name of the language in English.\n @type name: C{str}\n\n @ivar ownname: Name of the language in the language itself.\n @type ownname: C{str}\n\n @ivar isocode: Systematic name of the language.\n @type isocode: C{str}\n\n @ivar plural: Plural form number.\n @type plural: C{int}\n\n @ivar textdir: Text direction: \"ltr\" or \"rtl\"\n @type textdir: C{str}\n\n @ivar grflangid: Language id according to the NewGRF system.\n @type grflangid: C{int}\n\n @ivar gender: Genders of the language.\n @type gender: C{list} of C{str}\n\n @ivar case: Cases of the language.\n @type case: C{list} of C{str}\n\n @ivar is_stable: Whether the language is considered to be 'stable'. Default C{True}.\n @type is_stable: C{bool}\n \"\"\"\n\n def __init__(self, filename, found_lines):\n self.filename = filename\n self.name = found_lines[\"name\"]\n self.ownname = found_lines[\"ownname\"]\n self.isocode = found_lines[\"isocode\"]\n self.plural = found_lines[\"plural\"]\n self.textdir = found_lines[\"textdir\"]\n self.grflangid = found_lines[\"grflangid\"]\n\n gender = found_lines.get(\"gender\")\n if gender is None:\n gender = []\n self.gender = gender\n\n case = found_lines.get(\"case\")\n if case is None:\n case = []\n if \"\" not in case:\n case.append(\"\")\n self.case = case\n self.is_stable = True # By default, all languages are stable.\n\n\ndef as_str(text):\n return text.strip()\n\n\ndef as_int(text):\n if text[:2] in (\"0x\", \"0X\"):\n return int(text, base=16)\n else:\n return int(text, base=10)\n\n\ndef as_strlist(text):\n return list(set(text.split()))\n\n\n# Recognized lines in a language file.\nLanguageLine = namedtuple(\"LanguageLine\", [\"name\", \"pattern\", \"convert\", \"required\"])\nrecognized = [\n LanguageLine(\"name\", re.compile(\"##name +(.*) *$\"), as_str, True),\n LanguageLine(\"ownname\", re.compile(\"##ownname +(.*) *$\"), as_str, True),\n LanguageLine(\"isocode\", re.compile(\"##isocode +([a-z][a-z]_[A-Z][A-Z]) *$\"), as_str, True),\n LanguageLine(\"plural\", re.compile(\"##plural +((0[xX])?[0-9A-Fa-f]+) *$\"), as_int, True),\n LanguageLine(\"textdir\", re.compile(\"##textdir +(ltr|rtl) *$\"), as_str, True),\n LanguageLine(\"grflangid\", re.compile(\"##grflangid +((0[xX])?[0-9A-Fa-f]+) *$\"), as_int, True),\n LanguageLine(\"gender\", re.compile(\"##gender +(.*) *$\"), as_strlist, False),\n LanguageLine(\"case\", re.compile(\"##case +(.*) *$\"), as_strlist, False),\n]\n\n\ndef parse_file(fname):\n \"\"\"\n Parse a language file, collecting the recognized lines.\n\n @param fname: Name of the file to read.\n @type fname: C{str}\n\n @return: The found meta-information about a language.\n @rtype: C{LanguageData}\n \"\"\"\n with open(fname, \"r\", encoding=\"utf-8\") as handle:\n found_lines = {}\n for line in handle:\n if not line.startswith(\"##\"):\n continue\n\n line = line.rstrip()\n for ll in recognized:\n m = ll.pattern.match(line)\n if m:\n found_lines[ll.name] = ll.convert(m.group(1))\n break\n\n if not all(ll.name in found_lines for ll in recognized if ll.required):\n for ll in recognized:\n if ll.required and ll.name not in found_lines:\n log.error(\n 'File \"%s\" is missing required language line ##%s (or it has the wrong format)', fname, ll.name\n )\n sys.exit(1)\n\n return LanguageData(os.path.splitext(os.path.basename(fname))[0], found_lines)\n\n\ndef load_dir(directory):\n \"\"\"\n Find all text files (\".txt\" extension) in the provided directory, and load\n the meta-language information from them.\n\n @param directory: Directory path containing language meta-data text files.\n @type directory: C{str}\n\n @return: The found language information.\n @rtype: C{list} of L{LanguageData}\n \"\"\"\n result = []\n for fname in os.listdir(directory):\n if fname.lower().endswith(\".txt\"):\n result.append(parse_file(os.path.join(directory, fname)))\n\n return result\n\n\ndef set_all_languages(lang_infos):\n \"\"\"\n Set the available language information.\n\n @param lang_infos: Language meta information to use.\n @type lang_infos: C{list} of L{LanguageData}\n \"\"\"\n global all_languages, grflangid, isocode\n\n all_languages = lang_infos\n\n grflangid = dict((x.grflangid, x) for x in all_languages)\n assert len(all_languages) == len(grflangid)\n\n isocode = dict((x.isocode, x) for x in all_languages)\n assert len(all_languages) == len(isocode)\n","repo_name":"OpenTTD/eints","sub_path":"webtranslate/newgrf/language_info.py","file_name":"language_info.py","file_ext":"py","file_size_in_byte":6458,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"70103069128","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('orm', '0016_clone_progress'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Distro',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('up_id', models.IntegerField(default=None, null=True)),\n ('up_date', models.DateTimeField(default=None, null=True)),\n ('name', models.CharField(max_length=255)),\n ('description', models.CharField(max_length=255)),\n ('layer_version', models.ForeignKey(to='orm.Layer_Version', on_delete=models.CASCADE)),\n ],\n ),\n ]\n\n","repo_name":"openbmc/openbmc","sub_path":"poky/bitbake/lib/toaster/orm/migrations/0017_distro_clone.py","file_name":"0017_distro_clone.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":1525,"dataset":"github-code","pt":"16"} +{"seq_id":"32850909555","text":"h,m,s,t1,t2=map(int,input().split())\nc=[0]*3600*12\nh=h%12\nt1=t1%12\nt2=t2%12\nc[3600*h+60*m+s]=1\nc[720*m+12*s]=1\nc[720*s]=1\nt1,t2=min(t1,t2),max(t1,t2)\nt1*=3600\nt2*=3600\nflag=False\nfor i in range(t1+1,t2):\n if(c[i]):\n flag=True\n break\nif(not flag):\n print('YES')\nelse:\n flag=False\n for i in range(3600*12-1,t2,-1):\n if(c[i]):\n flag=True\n break\n for i in range(t1):\n if(c[i]):\n flag=True\n break\n if(not flag):\n print('YES')\n else:\n print('NO')\n\n\n \n","repo_name":"Pulkit3108/Codeforces-Problems","sub_path":"Round#438(Div. 1 + Div. 2)B.py","file_name":"Round#438(Div. 1 + Div. 2)B.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"41280121489","text":"#!/usr/bin/env python\\n\n# -*- coding: utf-8 -*-\n\nimport sublime\nfrom .rl_issue_field_change import RedlimeIssueFieldChangeCommand\n\n\nclass RedlimeDoneRatioIssueCommand(RedlimeIssueFieldChangeCommand):\n\n def change(self):\n # https://github.com/redmine/redmine/blob/3.4-stable/app/views/issues/_attributes.html.erb#L72\n self.enums = [str(10 * x) for x in range(0, 11)]\n\n sublime.set_timeout(lambda: self.view.window().show_quick_panel(self.enums, self.on_done), 1)\n\n def on_done(self, idx):\n if idx >= 0:\n self.issue.done_ratio = int(self.enums[idx])\n self.issue.save()\n self.view.window().status_message('Done ratio changed!')\n self.refresh()\n","repo_name":"tosher/Redlime","sub_path":"base/rl_issue_doneratio_change.py","file_name":"rl_issue_doneratio_change.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"16"} +{"seq_id":"21507244123","text":"from functools import partial\nfrom IPython.display import display, HTML\nimport numpy as np\nimport operator\nimport os\nimport pandas as pd\nimport pandas_flavor as pf\nfrom pathlib import Path\nfrom sklearn.model_selection import KFold\n\n\nclass DataFrameReader:\n \"\"\"CSV reader that first tries to load local file, then falls back to S3.\n \"\"\"\n\n def __init__(self, bucket, s3_root=''):\n \"\"\"Initialize a DataFrameReader that can be used throughout a project.\n\n Parameters\n ----------\n bucket: str\n Name of s3 bucket.\n s3_root: str\n Root prefix in S3 path that is the equivalent of 'data/' locally.\n This will typically be either 'data/' or ''.\n \"\"\"\n self.bucket = bucket\n self.s3_root = s3_root\n self.s3_pre = f's3://{self.bucket}/'\n\n def read_csv(self, path, verbose=True, **kwargs):\n \"\"\"Try to read a csv locally, or from S3 if that fails.\n\n Parameters\n ----------\n path: str or path\n Local file to load.\n verbose: bool\n If True, print message after loading.\n kwargs: any\n Additional kwargs for `pd.read_csv()`.\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n try:\n df = pd.read_csv(path, **kwargs)\n if verbose: print(f'Loaded {df.shape[0]} rows from local file.')\n except FileNotFoundError:\n df = pd.read_csv(self._convert_path(path), **kwargs)\n if verbose: print(f'Loaded {df.shape[0]} rows from S3.')\n return df\n\n def _convert_path(self, path):\n \"\"\"Convert local path to S3 path.\n\n Parameters\n ----------\n path: str or Path\n Local file to load.\n\n Returns\n -------\n str: S3 path.\n \"\"\"\n path = os.path.join(*Path(path).parts[1:])\n return self.s3_pre + self.s3_root + path\n\n\n@pf.register_series_method\n@pf.register_dataframe_method\ndef ends(df, n=3):\n \"\"\"Display the first and last few rows of a dataframe.\n\n Parameters\n -----------\n n: int\n Number of rows to return in the head and tail, respectively. The total\n number of rows returned will be equal to 2*n.\n\n Returns\n --------\n pd.DataFrame\n \"\"\"\n assert n > 0, 'n must be positive.'\n\n if df.shape[0] < 2 * n:\n return df\n return pd.concat((df.head(n), df.tail(n)), axis=0)\n\n\n@pf.register_dataframe_method\ndef filter_by_count(df, col, method, value, norm=False):\n \"\"\"Filter a dataframe to return a subset of rows determined by their\n value_counts(). For example, we can return rows with users who appear\n at least 5 times in the dataframe, or with users who appear less than 10\n times, or who appear exactly once.\n\n Parameters\n -----------\n col: str\n Name of dataframe column to filter by.\n method: str\n Symbol specifying which operation to use for filtering.\n One of ('=', '<', '>', '<=', '>=').\n value: int, float\n Numeric value that each row in `col` will be compared against.\n norm: bool\n If True, filtering will occur on normalized values (so the value should\n be a float between 0 and 1).\n\n Returns\n --------\n pd.DataFrame\n\n Examples\n ---------\n Return rows containing users who appear at least 5 times:\n df.filter_by_count('user_id', '>=', 5)\n\n Return rows containing users who appear only once:\n df.filter_by_count('user_id', '=', 1)\n\n Return rows containing users who make up less than 20% of rows:\n df.filter_by_count('user_id', '<', .2, True)\n \"\"\"\n operation = {'=': operator.eq,\n '>': operator.gt,\n '<': operator.lt,\n '>=': operator.ge,\n '<=': operator.le\n }\n counts = df[col].value_counts(norm).loc[lambda x:\n operation[method](x, value)]\n return df[df[col].isin(counts.index)]\n\n\n@pf.register_dataframe_method\ndef grouped_mode(df, xs, y):\n \"\"\"Return the most common value in column y for each value or combination\n of values of xs. Note that this can be slow, especially when passing in\n multiple x columns.\n\n Parameters\n -----------\n xs: list[str]\n One or more column names to group by.\n y: str\n Column to calculate the modes from.\n\n Returns\n --------\n pd.Series\n \"\"\"\n return df.dropna(subset=[y])\\\n .groupby(xs)[y]\\\n .agg(lambda x: pd.Series.mode(x)[0])\n\n\n@pf.register_dataframe_method\ndef impute(df, col, fill_val=None, method='mean', inplace=False, dummy=True):\n \"\"\"Fill null values in the specified column, then optionally add an\n additional column specifying whether the first column was originally null.\n This can be useful in certain machine learning problems if the fact that a\n value is missing may indicate something about the example.\n\n For instance, we might try to predict student test scores, where one\n feature column records the survey results of asking the student's parent to\n rate their satisfaction with the teacher on a scale of 1-5. If the value is\n missing, that means the parent didn't take the survey, and therefore may\n not be very involved with the student's academics. This could be highly\n relevant information that we don't want to discard, which we would if we\n simply imputed the missing value and made no record of it.\n\n Parameters\n -----------\n col: str\n Name of df column to fill null values for.\n fill_val: str, int, float, None\n If specified, this constant value will be used to impute missing\n values. If None, the `method` argument will be used to compute a fill\n value.\n method: str\n One of ('mean', 'median', 'mode'). This will only be used when fill_val\n is None. More complex methods, such as building a model to predict the\n missing values based on other features, must be done manually.\n inplace: bool\n Specify whether to perform the operation in place (default False).\n dummy: bool\n Specify whether to add a dummy column recording whether the value was\n initially null (default True).\n\n Returns\n --------\n pd.DataFrame\n \"\"\"\n if not inplace:\n df = df.copy()\n\n # If adding a dummy column, it must be created before imputing null values.\n if dummy:\n df[col + '_isnull'] = df[col].isnull() * 1\n\n # Mode returns a series, mean and median return primitives.\n if fill_val is None:\n fill_val = getattr(df[col], method)()\n if method == 'mode':\n fill_val = fill_val[0]\n df[col].fillna(fill_val, inplace=True)\n\n if not inplace:\n return df\n\n\n@pf.register_dataframe_method\ndef target_encode(df, x, y, n=5, stat='mean', shuffle=True, state=None,\n inplace=False, df_val=None):\n \"\"\"Compute target encoding based on one or more feature columns.\n\n Parameters\n -----------\n x: str, list[str]\n Name of columns to group by.\n y: str\n Name of target variable column.\n n: int\n Number of folds for regularized version. Must be >1.\n stat: str\n Specifies the type of aggregation to use on the target column.\n Typically this would be mean or occasionally median, but all the\n standard dataframe aggregation functions are available:\n ('mean', 'median', 'min', 'max', 'std', 'var', 'skew').\n shuffle: bool\n Specifies whether to shuffle the dataframe when creating folds of the\n data. This would be important, for instance, if the dataframe is\n ordered by a user_id, where each user has multiple rows. Here, a lack\n of shuffling means that all of a user's rows are likely to end up in\n the same fold. This effectively eliminates the value of creating the\n folds in the first place.\n state: None, int\n If state is an integer and shuffle is True, the folds produced by\n KFold will be repeatable. If state is None (the default) and shuffle\n is True, shuffling will be different every time.\n inplace: bool\n Specifies whether to do the operation in place. The inplace version\n does not return anything. When inplace==False, the dataframe is\n returned.\n df_val: None, pd.DataFrame\n Validation set (optional). If provided, naive (i.e. un-regularized)\n target encoding will be performed using the labels from the original\n (i.e. training) df. NOTE: Inplace must be True when passing in df_val,\n because we only return the original df.\n\n Returns\n --------\n pd.DataFrame or None\n \"\"\"\n assert df_val is None or inplace, 'To encode df_val, inplace must be True.'\n # Prevents SettingWithCopy warning, which is not actually an issue here.\n pd.options.mode.chained_assignment = None\n\n if not inplace:\n df = df.copy()\n new_col = f\"{'_'.join(x)}__{stat}_enc\"\n global_agg = getattr(df[y], stat)()\n df[new_col] = global_agg\n\n def indexer(row):\n \"\"\"Map a dataframe row to its grouped target value. When we group by\n multiple columns, our groupby object `enc` will require a tuple index.\n\n Note: When benchmarking function speed, it was slightly faster when\n leaving the if statement inside this function. Not sure if this is a\n coincidence but it at least seems like it's not hurting performance.\n \"\"\"\n key = row[0] if len(x) == 1 else tuple(row)\n return enc.get(key, global_agg)\n\n # Compute target encoding on n-1 folds and map back to nth fold.\n for train_idx, val_idx in KFold(n, shuffle, state).split(df):\n enc = getattr(df.iloc[train_idx, :].groupby(x)[y], stat)()\n mapped = df.loc[:, x].iloc[val_idx].apply(indexer, axis=1)\n df.loc[:, new_col].iloc[val_idx] = mapped\n df[new_col].fillna(global_agg, inplace=True)\n\n # Encode validation set in place if it is passed in. No folds are used.\n if df_val is not None:\n enc = getattr(df.groupby(x)[y], stat)()\n df_val[new_col] = df_val[x].apply(indexer, axis=1).fillna(global_agg)\n\n if not inplace:\n return df\n\n\n@pf.register_dataframe_method\ndef top_categories(df, col, n_categories=None, threshold=None):\n \"\"\"Filter a dataframe to return rows containing the most common categories.\n This can be useful when a column has many possible values, some of which\n are extremely rare, and we want to consider only the ones that occur\n relatively frequently.\n\n The user can either specify the number of categories to include or set\n a threshold for the minimum number of occurrences. One of `categories` and\n `threshold` should be None, while the other should be an integer.\n\n Parameters\n -----------\n col: str\n Name of column to filter on.\n n_categories: int, None\n Optional - # of categories to include (i.e. top 5 most common\n categories).\n threshold: int, None\n Optional - Value count threshold to include (i.e. all categories that\n occur at least 10 times).\n\n Returns\n --------\n pd.DataFrame\n \"\"\"\n assert bool(n_categories) + bool(threshold) == 1\n\n if n_categories is not None:\n top = df[col].value_counts(ascending=False).head(n_categories).index\n return df[df[col].isin(top)]\n if threshold is not None:\n return df.groupby(col).filter(lambda x: len(x) >= threshold)\n\n\n@pf.register_series_method\ndef vcounts(df_col, **kwargs):\n \"\"\"Return both the raw and normalized value_counts of a series.\n\n Parameters\n -----------\n Most parameters in value_counts() are available (i.e. `sort`, `ascending`,\n `dropna`), with the obvious exception of `normalize` since that is handled\n automatically.\n\n Returns\n --------\n pd.DataFrame\n\n Examples\n ---------\n df.colname.vcounts()\n \"\"\"\n if 'normalize' in kwargs.keys():\n del kwargs['normalize']\n\n counts = df_col.value_counts(**kwargs)\n normed_counts = df_col.value_counts(normalize=True, **kwargs)\n\n # Pandas seems to have problem merging on bool col/index. Could use\n # pd.concat but unsure if order is consistent in case of ties.\n if counts.name is None:\n counts.name = 'raw'\n normed_counts.name = 'normed'\n\n df = pd.merge(counts, normed_counts,\n how='left', left_index=True, right_index=True,\n suffixes=['_raw_count', '_normed_count'])\\\n .reset_index()\n\n col_name = '_'.join(df.columns[1].split('_')[:-2])\n return df.rename({'index': col_name}, axis=1)\n\n\n@pf.register_series_method\n@pf.register_dataframe_method\ndef pprint(df):\n \"\"\"Display a dataframe of series as a rendered HTML table in\n Jupyter notebooks. Useful when printing multiple outputs in a cell.\n \"\"\"\n if isinstance(df, pd.core.series.Series):\n df = pd.DataFrame(df)\n display(HTML(df.to_html()))\n\n\n@pf.register_series_method\n@pf.register_dataframe_method\ndef lambda_sort(df, func, **kwargs):\n \"\"\"Sort a DataFrame or Series by a function that takes itself as input.\n For example, we can sort by the absolute value of a column or the sum of\n 2 different columns.\n Parameters\n -----------\n func: function\n Callable function or lambda expression to sort by.\n (eg: lambda x: abs(x))\n **kwargs: additional keyword args will be passed to the sort_values()\n method.\n\n Returns\n --------\n pd.DataFrame\n\n Examples\n ---------\n >>> df = pd.DataFrame(np.arange(8).reshape((4, 2)), columns=['a', 'b'])\n >>> df.loc[3, 'a'] *= -1\n >>> df\n\n a b\n 0 0 1\n 1 2 3\n 2 4 6\n 3 -6 7\n\n >>> df.lambda_sort(lambda x: x.a * x.b)\n\n a b\n 3 -6 7\n 2 0 1\n 1 2 3\n 0 4 5\n \"\"\"\n col = 'lambda_col'\n df = df.copy()\n if isinstance(df, pd.core.series.Series):\n df = pd.DataFrame(df)\n df[col] = func(df)\n return df.sort_values(col, **kwargs).drop(col, axis=1)\n\n\n@pf.register_dataframe_method\ndef coalesce(df, cols):\n \"\"\"Create a column where each row contains the first non-null value for\n that row from a list of columns.\n\n Parameters\n ----------\n cols: list[str]\n Names of columns to coalesce over.\n\n Returns\n -------\n pd.Series\n \"\"\"\n new_col = df[cols[0]].copy()\n i = 1\n while new_col.isnull().sum() > 0 and i < len(cols):\n next_col = cols[i]\n new_col.fillna(df[next_col], inplace=True)\n i += 1\n return new_col\n\n\n@pf.register_series_method\ndef stringify(list_col, join=True, ignore_terms=None, greedy_ignore=False,\n null=''):\n \"\"\"Converts a df column of lists, possibly containing np.nan's, to strings.\n\n Parameters\n -----------\n join: bool\n If True, create the string by joining all items in each list/row. If\n False, simply return the first item in each list. Default True.\n ignore_terms: list, set\n Terms to drop from the column. If None, all terms will be retained.\n Ex: {'width=device-width'}\n greedy_ignore: bool\n If True, ignore_terms can be a list of prefixes. In other words,\n we will remove any strings in the list column that start with one of\n the ignore_terms even (as opposed to requiring an exact match.)\n null: str\n The value to replace null values with. For many pandas string methods,\n it is beneficial for this to be a string rather than np.nan.\n\n Returns\n -------\n pd.Series\n \"\"\"\n ignore_terms = dict.fromkeys(ignore_terms or [])\n\n def process(x, join, ignore_terms, greedy_ignore, null):\n # Handles both np.nan and empty lists.\n if not isinstance(x, list) or not x:\n return null\n\n # Dict instead of set to maintain order\n # (dict-key operations would still change order).\n x = dict.fromkeys(map(str, x))\n if greedy_ignore:\n x = (term for term in x if not term.startswith(tuple(ignore_terms)))\n else:\n x = (term for term in x if term not in ignore_terms)\n\n # Return a string.\n if join:\n return ' '.join(x)\n else:\n return next(x)\n\n return list_col.map(partial(process, join=join, ignore_terms=ignore_terms,\n greedy_ignore=greedy_ignore, null=null))\n\n\n@pf.register_series_method\ndef is_list_col(col):\n \"\"\"Determine whether a column is a list column. These are columns\n resulting from the protobuf format, where we end up with a situation\n where rows either contains lists or np.nan.\n\n Parameters\n -----------\n col: pd.Series\n The column to evaluate.\n\n Returns\n --------\n bool\n \"\"\"\n # Filter out nulls first otherwise type could be np.nan instead of list.\n no_nulls = col.dropna()\n return not no_nulls.empty and isinstance(no_nulls.iloc[0], list)\n\n","repo_name":"hdmamin/pandas_htools","sub_path":"pandas_htools/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":16928,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"22151394118","text":"import pandas as pd \nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\n\n\ndf = pd.read_csv(\"/home/Robbie/Desktop/Drug_Design_Project/Data_Files/PubChem_with_toxicity.csv\")\n# print(df)\n# print(df)\ndf = df.dropna()\nnumeric = df.select_dtypes(include='number').columns\ndf = df[numeric]\n# print(df)\n\ny = df[\"mouse_intraperitoneal_LD50\"]\nX = df.drop(\n [\"mouse_intraperitoneal_LD50\",\n \"Unnamed: 0\"], axis=1)\n\nfile=open('AI_model/' +\\\n \"AI_ouput.csv\", 'a')\n\n\ndfs = []\nfor i in X.columns:\n \n X_train, X_test, y_train, y_test = train_test_split(X[i], y, test_size=0.2, random_state=100)\n\n # print(\"X_train: \", X_train)\n # print(\"y_train: \", y_train)\n\n X_train = pd.DataFrame(X_train)\n # print(type(X_train))\n\n X_test = pd.DataFrame(X_test)\n\n lr = LinearRegression()\n lr.fit(X=X_train, y=y_train)\n\n y_lr_train_pred = lr.predict(X_train)\n y_lr_test_pred = lr.predict(X_test)\n\n # print(y_lr_train_pred, y_lr_test_pred)\n\n from sklearn.metrics import mean_squared_error, r2_score\n\n lr_train_mse = mean_squared_error(y_train, y_lr_train_pred)\n lr_train_r2 = r2_score(y_train, y_lr_train_pred)\n\n lr_test_mse = mean_squared_error(y_test, y_lr_test_pred)\n lr_test_r2 = r2_score(y_test, y_lr_test_pred)\n\n # print(lr_train_mse, lr_train_r2, lr_test_mse, lr_test_r2)\n data = {\n 'Model': ['Linear Regression'],\n 'Data': [i],\n 'Train MSE': [lr_train_mse],\n 'Train R2': [lr_train_r2],\n 'Test MSE': [lr_test_mse],\n 'Test R2': [lr_test_r2]\n }\n\n lr_results = pd.DataFrame(data)\n dfs.append(lr_results)\n\nfirst_df = pd.DataFrame()\nfor i in dfs:\n first_df = pd.concat([first_df, i], ignore_index=True)\nfinal_df = pd.concat(first_df)\nfirst_df.to_csv(file,index=False)\n \n ","repo_name":"RSimmondsIV/Drug_Design_Project","sub_path":"AI_model/ai_model.py","file_name":"ai_model.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"19806714885","text":"from __future__ import absolute_import\n\nimport typing\nimport inspect\nimport oneflow.python.framework.remote_blob as remote_blob_util\nimport oneflow.python.framework.typing as oft\nimport oneflow.python.experimental.enable_typing_check as enable_typing_check\n\n\ndef CheckGlobalFunctionAnnotation(signature):\n parameters = signature.parameters\n if all(p.annotation is not inspect._empty for _, p in parameters.items()):\n for _, p in parameters.items():\n assert (\n p.kind == inspect._ParameterKind.POSITIONAL_OR_KEYWORD\n ), \"no parameters like *args or **kwargs supported\"\n CheckGlobalFunctionParamAnnotation(p.annotation)\n elif enable_typing_check.typing_check_enabled:\n for param_name, p in parameters.items():\n if p.annotaion is inspect._empty:\n raise NotImplementedError(\"parameter %s is not annotated\" % param_name)\n else:\n # do nothing\n pass\n return_annotation = signature.return_annotation\n if return_annotation is not inspect._empty:\n CheckGlobalFunctionReturnAnnotation(return_annotation)\n elif enable_typing_check.typing_check_enabled:\n raise NotImplementedError(\"no return annotation found.\")\n else:\n # do nothing\n pass\n\n\ndef CheckGlobalFunctionParamAnnotation(cls):\n if oft.OriginFrom(cls, typing.Tuple):\n assert cls.__args__ is not None, \"T in typing.Tuple[T, ...] cannot be omitted\"\n assert len(cls.__args__) > 0\n for cls_arg in cls.__args__:\n CheckGlobalFunctionParamAnnotation(cls_arg)\n elif oft.OriginFrom(cls, oft.OneflowNumpyDef):\n pass\n else:\n raise NotImplementedError(\"invalid parameter annotation %s found\" % cls)\n\n\ndef CheckGlobalFunctionReturnAnnotation(cls):\n if cls is None:\n pass\n elif oft.OriginFrom(cls, oft.Callback):\n assert (\n cls.__args__ is not None\n ), \"T in oneflow.typing.Callback[T] cannot be omitted\"\n assert len(cls.__args__) == 1\n _CheckGlobalFunctionReturnAnnotation(cls.__args__[0])\n else:\n _CheckGlobalFunctionReturnAnnotation(cls)\n\n\ndef _CheckGlobalFunctionReturnAnnotation(cls):\n if oft.OriginFrom(cls, typing.Tuple):\n assert cls.__args__ is not None, \"T in typing.Tuple[T, ...] cannot be omitted\"\n assert len(cls.__args__) > 0\n for cls_arg in cls.__args__:\n _CheckGlobalFunctionReturnAnnotation(cls_arg)\n elif oft.OriginFrom(cls, typing.Dict):\n assert cls.__args__ is not None, \"(K, V) in typing.Dict[K,V] cannot be omitted\"\n assert len(cls.__args__) == 2\n _CheckGlobalFunctionReturnAnnotation(cls.__args__[1])\n elif oft.OriginFrom(cls, oft.PyStructCompatibleToBlob):\n pass\n else:\n raise NotImplementedError(\"invalid return annotation %s found\" % cls)\n\n\ndef CheckReturnByAnnotation(function_name, ret, annotation):\n if annotation is inspect._empty:\n return\n if annotation is None:\n error_str = (\n \"%s does not matched return annotation %s of global_function %s.\"\n % (ret, annotation, function_name)\n )\n assert ret is None, error_str\n elif oft.OriginFrom(annotation, oft.Callback):\n _CheckReturnByAnnotation(function_name, ret, annotation.__args__[0])\n else:\n _CheckReturnByAnnotation(function_name, ret, annotation)\n\n\ndef _CheckReturnByAnnotation(function_name, ret, annotation):\n error_str = \"%s does not matched return annotation %s of global_function %s.\" % (\n ret,\n annotation,\n function_name,\n )\n if oft.OriginFrom(annotation, typing.Tuple):\n assert type(ret) is tuple, error_str\n assert len(ret) == len(annotation.__args__), \"%s length compare: %s v.s. %s\" % (\n error_str,\n len(ret),\n len(annotation.__args__),\n )\n for ret_i, annotation_i in zip(ret, annotation.__args__):\n _CheckReturnByAnnotation(function_name, ret_i, annotation_i)\n elif oft.OriginFrom(annotation, typing.Dict):\n assert len(annotation.__args__) == 2\n assert type(ret) is dict, error_str\n for key, val in ret.items():\n assert type(key) is annotation.__args__[0], (\n \"type of %s:%s and %s:%s do not matched return annotation (%s, %s) of global_function %s.\"\n % (\n key,\n type(key),\n val,\n type(val),\n annotation.__args__[0],\n annotation.__args__[1],\n function_name,\n )\n )\n _CheckReturnByAnnotation(function_name, val, annotation.__args__[1])\n elif oft.OriginFrom(annotation, oft.Numpy):\n assert isinstance(ret, remote_blob_util.BlobDef), \"type(ret): %s\" % type(ret)\n assert not ret.is_dynamic, (\n \"only fixed shaped blob compatible to oneflow.typing.Numpy. \"\n \"you can change annotation to oneflow.typing.ListNumpy \"\n \"or oneflow.typing.ListListNumpy\"\n )\n elif oft.OriginFrom(annotation, oft.ListNumpy):\n assert isinstance(ret, remote_blob_util.BlobDef), \"type(ret): %s\" % type(ret)\n elif oft.OriginFrom(annotation, oft.ListListNumpy):\n assert isinstance(ret, remote_blob_util.BlobDef), \"type(ret): %s\" % type(ret)\n else:\n raise NotImplementedError(\"invalid return annotation %s found\" % annotation)\n\n\ndef TransformGlobalFunctionResult(future_blob, annotation):\n if annotation is inspect._empty:\n return future_blob\n elif annotation is None:\n assert future_blob is None\n return None\n elif oft.OriginFrom(annotation, oft.Callback):\n annotation = annotation.__args__[0]\n\n def Transform(f):\n return lambda x: f(TransformReturnedLocalBlob(x, annotation))\n\n return lambda f: future_blob.async_get(Transform(f))\n else:\n return TransformReturnedLocalBlob(future_blob.get(), annotation)\n\n\ndef TransformReturnedLocalBlob(local_blob, annotation):\n if oft.OriginFrom(annotation, typing.Tuple):\n assert type(local_blob) is tuple\n assert len(local_blob) == len(annotation.__args__)\n pairs = zip(local_blob, annotation.__args__)\n return tuple(TransformReturnedLocalBlob(*pair) for pair in pairs)\n elif oft.OriginFrom(annotation, typing.Dict):\n assert type(local_blob) is dict\n assert len(annotation.__args__) == 2\n vals = [\n TransformReturnedLocalBlob(val, annotation.__args__[1])\n for val in local_blob.values()\n ]\n return dict(zip(local_blob.keys(), vals))\n elif oft.OriginFrom(annotation, oft.PyStructCompatibleToBlob):\n return TransformLocalBlob(local_blob, annotation)\n else:\n raise NotImplementedError(\n \"invalid watch callback parameter annotation %s found\" % annotation\n )\n\n\ndef CheckWatchCallbackParameterAnnotation(parameters):\n assert len(parameters) == 1, \"watch callback should accept only one parameter\"\n annotation = parameters[list(parameters.keys())[0]].annotation\n if annotation is inspect._empty:\n if enable_typing_check.typing_check_enabled:\n raise NotImplementedError(\"the watch callback's parameter is not annotated\")\n return\n if not oft.OriginFrom(annotation, oft.PyStructCompatibleToBlob):\n raise NotImplementedError(\n (\"invalid watch callback paremeter annotation %s found. \" % annotation)\n + \"candidate annotations: oneflow.typing.Numpy, oneflow.typing.ListNumpy, \"\n \"oneflow.typing.ListListNumpy\"\n )\n\n\ndef CheckWatchedBlobByAnnotation(blob, annotation):\n if annotation is inspect._empty:\n return\n if oft.OriginFrom(annotation, oft.Numpy):\n assert not blob.is_dynamic, (\n \"only fixed shaped blob compatible to oneflow.typing.Numpy. \"\n \"you can change annotation to oneflow.typing.ListNumpy \"\n \"or oneflow.typing.ListListNumpy\"\n )\n elif oft.OriginFrom(annotation, oft.ListNumpy):\n pass\n elif oft.OriginFrom(annotation, oft.ListListNumpy):\n pass\n else:\n raise NotImplementedError(\n \"invalid watch callback parameter annotation %s found\" % annotation\n )\n\n\ndef TransformWatchedBlob(future_blob, handler):\n parameters = inspect.signature(handler).parameters\n annotation = parameters[list(parameters.keys())[0]].annotation\n if annotation is inspect._empty:\n return future_blob\n return TransformLocalBlob(future_blob, annotation)\n\n\ndef TransformLocalBlob(future_blob, annotation):\n if oft.OriginFrom(annotation, oft.Numpy):\n return future_blob.numpy()\n elif oft.OriginFrom(annotation, oft.ListNumpy):\n return future_blob.numpy_list()\n elif oft.OriginFrom(annotation, oft.ListListNumpy):\n return future_blob.numpy_lists()\n else:\n raise NotImplementedError(\n \"invalid watch callback parameter annotation %s found\" % annotation\n )\n","repo_name":"Sodu-Qinming/Oneflow","sub_path":"oneflow/python/framework/typing_util.py","file_name":"typing_util.py","file_ext":"py","file_size_in_byte":9069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"71832295047","text":"import asyncio\n\n\nasync def waiter(event):\n print(\"Waiting for it ...\")\n # wait -> event 가 set 될때까지 Block\n await event.wait()\n print(\"... got it\")\n\n\nasync def main():\n # Create Evnet Object\n event = asyncio.Event()\n\n # Event set 까지 기다림\n waiter_task = asyncio.create_task(waiter(event))\n\n # Sleep for 1 sec and set the event\n await asyncio.sleep(1)\n # Evnet Flag를 참으로.\n event.set()\n # Set 된 Event 초기화\n event.clear()\n\n waiter_task2 = asyncio.create_task(waiter(event))\n # event.set()\n # Wait Until the waiter task is finished\n await asyncio.gather(waiter_task, waiter_task2)\n\n\nasyncio.run(main())\n","repo_name":"yeoV/concurrency-python","sub_path":"src/Python_Docs/Synchronization_Primitives/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"43585316994","text":"import numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport os\nimport sys\nimport xml.dom.minidom\nsys.path.append('/home/nhutsam/')\n# %matplotlib inline\nfrom nms import nms\nplt.rcParams['figure.figsize'] = (10, 10)\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\n# Make sure that caffe is on the python path:\ncaffe_root = '/home/nhutsam/receiptreaderdev/TextBoxes/' # this file is expected to be in {caffe_root}/examples\nos.chdir(caffe_root)\nimport sys\nsys.path.insert(0, 'python')\n\nimport caffe\n#caffe.set_device(0)\n#caffe.set_mode_gpu()\ncaffe.set_mode_cpu()\nmodel_def = '/home/nhutsam/receiptreaderdev/TextBoxes/examples/TextBoxes/deploy.prototxt'\nmodel_weights = '/home/nhutsam/receiptreaderdev/TextBoxes/examples/TextBoxes/TextBoxes_icdar13.caffemodel'\n\nuse_multi_scale = False\n\nif not use_multi_scale:\n scales=((700,700),)\nelse:\n\tscales=((300,300),(700,700),(700,500),(700,300),(1600,1600))\n\n# In[1]:\nnet = caffe.Net(model_def, # defines the structure of the model\n model_weights, # contains the trained weights\n caffe.TEST) # use test mode (e.g., don't perform dropout)\n\n# In[2]:7\ndt_results=[]\nimage_path='/home/nhutsam/receiptreaderdev/TextBoxes/examples/img/demo18.jpg'\nimage_path\nimage=caffe.io.load_image(image_path)\nimage_height,image_width,channels=image.shape\nplt.clf()\nplt.imshow(image)\ncurrentAxis = plt.gca()\nfor scale in scales:\n\tprint(scale)\n\timage_resize_height = scale[0]\n\timage_resize_width = scale[1]\n\ttransformer = caffe.io.Transformer({'data': (1,3,image_resize_height,image_resize_width)})\n\ttransformer.set_transpose('data', (2, 0, 1))\n\ttransformer.set_mean('data', np.array([104,117,123])) # mean pixel\n\ttransformer.set_raw_scale('data', 255) # the reference model operates on images in [0,255] range instead of [0,1]\n\ttransformer.set_channel_swap('data', (2,1,0)) # the reference model has channels in BGR order instead of RGB\n\t\n\tnet.blobs['data'].reshape(1,3,image_resize_height,image_resize_width)\t\t\n\ttransformed_image = transformer.preprocess('data', image)\n\tnet.blobs['data'].data[...] = transformed_image\n\t# Forward pass.\n\tdetections = net.forward()['detection_out']\n\t# Parse the outputs.\n\tdet_label = detections[0,0,:,1]\n\tdet_conf = detections[0,0,:,2]\n\tdet_xmin = detections[0,0,:,3]\n\tdet_ymin = detections[0,0,:,4]\n\tdet_xmax = detections[0,0,:,5]\n\tdet_ymax = detections[0,0,:,6]\n\ttop_indices = [i for i, conf in enumerate(det_conf) if conf >= 0.6]\n\ttop_conf = det_conf[top_indices]\n\ttop_xmin = det_xmin[top_indices]\n\ttop_ymin = det_ymin[top_indices]\n\ttop_xmax = det_xmax[top_indices]\n\ttop_ymax = det_ymax[top_indices]\n\n\tfor i in xrange(top_conf.shape[0]):\n\t\txmin = int(round(top_xmin[i] * image.shape[1]))\n\t\tymin = int(round(top_ymin[i] * image.shape[0]))\n\t\txmax = int(round(top_xmax[i] * image.shape[1]))\n\t\tymax = int(round(top_ymax[i] * image.shape[0]))\n\t\txmin = max(1,xmin)\n\t\tymin = max(1,ymin)\n\t\txmax = min(image.shape[1]-1, xmax)\n\t\tymax = min(image.shape[0]-1, ymax)\n\t\tscore = top_conf[i]\n\t\tdt_result=[xmin,ymin,xmax,ymin,xmax,ymax,xmin,ymax,score]\n\t\tdt_results.append(dt_result)\n\t\t\n\t\t\n# In[]:\nfrom math import sqrt\nnms_flag = nms(dt_results,0.4)\ndef sortBycoordinate(dt_results):\n coordinate_result = [dt_results[k] for k,dt in enumerate(dt_results) if nms_flag[k]==True]\n coordinate_result = sorted(coordinate_result, key=lambda x: x[1])\n n = len(coordinate_result)\n flag_res=[False]*len(coordinate_result) \n j=0\n final_res=[]\n while j!=-1:\n res=[]\n temp = (coordinate_result[j][5]+coordinate_result[j][1])/2\n for i in range(j,n):\n if coordinate_result[i][5]>temp and coordinate_result[i][1] %-20s' % (raw_pred, sim_pred))\n results+=sim_pred+' '\n\n\n\n\n\n\n\n\n\n\n","repo_name":"phucvu460/receiptreaderdev","sub_path":"receiptreaderdev/TextBoxes/examples/TextBoxes/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":6224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"17917280412","text":"import os\nimport sys\nfrom decimal import Decimal\n\ncur_path = os.path.dirname(os.path.realpath(__file__))\nsys.path.append(cur_path + '/../libs')\nsys.path.append(cur_path + '/../repositories')\nfrom bot_father import BotFather\nfrom bot_constant import *\nimport traceback\n\n\nclass SupportBot(BotFather):\n\n def __init__(self):\n super().__init__('EthBtcSupportBot')\n self.best_price_only = False\n self._trade_count = 0\n\n def bot_entry(self, web_input):\n super().bot_entry(web_input)\n # get params\n\n def begin_trade(self, web_input):\n trade_id = self._trade_count\n # buy or sell\n side = web_input['side']\n # profit = web_input['profit']\n amount = web_input['amount']\n type = web_input['type_order']\n # boolean place order profit, stop loss\n # is_take_stop_loss = web_input['take_stop_loss']\n is_take_profit = web_input['take_profit']\n is_take_price_manual = web_input['take_price_manual']\n self._log('support_botmom---35begin trade .. side {}'.format(side))\n # market order and callback place profit order\n meta_data = {\n 'trade_id': trade_id,\n # 'side': side,\n # 'amount': amount,\n # 'profit': profit,\n # 'is_take_profit': is_take_profit,\n # 'is_take_stop_loss': is_take_stop_loss,\n 'web_input': web_input\n }\n # get price\n if type == MARKET:\n self._process_place_order(is_take_profit, None, amount, side, type, meta_data)\n return True\n if is_take_price_manual:\n price = web_input['price']\n else:\n # fetch order book\n rank = int(web_input['rank'])\n type_price_order_book = web_input['type_price_order_book']\n if LIMIT == type_price_order_book:\n side_fetch_price_side = side\n else:\n # market\n side_fetch_price_side = BUY if side == SELL else SELL\n price = self._fetch_best_price(self._pair, side_fetch_price_side, index=rank)\n self._process_place_order(is_take_profit, price, amount, side, type, meta_data)\n return True\n\n def _process_place_order(self, is_take_profit, price, amount, side, type, meta_data):\n if is_take_profit:\n # type, side\n order_info = self.place_order(price, amount, side, self._pair, type=type,\n meta_data=meta_data, callback=self._callback_order_status)\n else:\n order_info = self.place_order(price, amount, side, self._pair, type=type,\n meta_data=meta_data, callback=None)\n\n def _callback_order_status(self, data):\n try:\n if not data:\n self._log('support_botmom---77Data is None, return False')\n return False\n # order close\n if ORDER_CLOSED == data[KEY_GET_ORDER_STATUS]:\n # place profit order, return True\n self._log('support_botmom---82_callback_order_status with previous data {}'.format(data))\n meta_data = data[KEY_GET_ORDER_META_DATA]\n web_input = meta_data['web_input']\n profit = web_input['profit']\n side = BUY if data[KEY_GET_ORDER_SIDE] == SELL else SELL\n price_profit = float(Decimal(data[KEY_GET_ORDER_AVERAGE_PRICE]) \\\n - Decimal(str(STEP_WISE[side])) * Decimal(profit))\n order_info = self.place_order(price_profit, data[KEY_GET_ORDER_AMOUNT], side, self._pair, type=LIMIT,\n callback=self.__callback_order_profit)\n\n is_take_stop_loss = web_input.get('take_stop_loss', False)\n if is_take_stop_loss and order_info:\n s_stop_loss_value = web_input['stop_loss']\n type_stop_loss = web_input['type_stop_loss']\n price_stop_loss = float(Decimal(data[KEY_GET_ORDER_AVERAGE_PRICE]) \\\n + Decimal(str(STEP_WISE[side])) * Decimal(s_stop_loss_value))\n self.place_stop_loss_order_helper(price_stop_loss, data[KEY_GET_ORDER_FILLED], side, self._pair,\n type_stop_loss, cb_when_order_open=self._callback_when_stop_loss_open,\n order_profit_id=order_info[KEY_GET_ORDER_ID])\n elif ORDER_CANCELED == data[KEY_GET_ORDER_STATUS]:\n self._log('Something wired, Order canceled')\n except Exception as e:\n tb = traceback.format_exc()\n print('ERROR {}'.format(tb))\n self._log('support_botmom---106ERROR {}'.format(tb))\n\n def _callback_when_stop_loss_open(self, data):\n self._log('Stop loss order open {}'.format(data))\n\n def __callback_order_profit(self, data):\n if ORDER_CLOSED == data[KEY_GET_ORDER_STATUS]:\n self._log(\"support_botmom---113 Order profit callback {}\".format(data))\n","repo_name":"codepritesh/saastoolfeb","sub_path":"bots/support_bot.py","file_name":"support_bot.py","file_ext":"py","file_size_in_byte":5122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34433173035","text":"'''\nDate: 2021/12/24\nAuthor: David Wu\nLink: https://www.algoexpert.io/questions/Product%20Sum\n\nQuestion Summary:\nThe product sum of [x, [y, [z]]] is x + 2 * (y + 3z)\n\nExample:\n[1, [2, [3]]] = 1 + 2 * (2 + 3 * 3) = 23\n\nOptimal Complexity: O(n) || O(c)\n'''\n\ndef productSum(array, currentLevel=1):\n\tcurrentSum = 0\n\tfor x in array:\n\t\tif type(x) == list:\n\t\t\tcurrentSum += productSum(x, currentLevel+1)\n\t\telse:\n\t\t\tcurrentSum += x\n\t\t\t\n\treturn currentSum * currentLevel\n\n \nif __name__ == '__main__': \n\n\tprint(productSum([1, [2, [3]]]) == 23)\n\n\n \n","repo_name":"UWDavidWu/AlgoExpert_Daily_Question","sub_path":"Easy/11. Product_Sum.py","file_name":"11. Product_Sum.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"21091388855","text":"\"\"\"\nEntry point of flask application.\n\"\"\"\nimport os\nfrom sys import exit\nfrom flask_migrate import Migrate\nfrom flask_minify import Minify\n\nfrom src.config import config_dict\nfrom src import create_app, db\n\nDEBUG = True\n\ntry:\n app_config = config_dict['dev']\nexcept KeyError:\n exit('Error: Invalid config.')\n\napp = create_app(app_config)\nMigrate(app, db)\n\nif not DEBUG:\n Minify(app=app, html=True, js=False, cssless=False)\n\nif DEBUG:\n app.logger.info('DEBUG = ' + str(DEBUG))\n app.logger.info('Page Compression = ' + 'FALSE' if DEBUG else 'TRUE')\n app.logger.info('DBMS = ' + app_config.SQLALCHEMY_DATABASE_URI)\n app.logger.info('ASSETS_ROOT = ' + app_config.ASSETS_ROOT)\n\nif __name__ == \"__main__\":\n # app = create_app()\n app.run(debug=True, host=\"0.0.0.0\")\n","repo_name":"dheeruymv/food_4_all","sub_path":"backend/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"71100738247","text":"\n\"\"\"\n• 本质上是3字节转4字节\n• 3*8=4*6\n• 将3字节中数据每次取6 位\n• 用6位得到的数值 (0-63)作为索引去查编码表\n• 得到编码字符\n\"\"\"\nimport string \nimport base64\n\ndef base64Encode(en_string):\n oldBin = ''\n tempStr = []\n result = ''\n base64_list = string.ascii_uppercase +string.ascii_lowercase \\\n + string.digits + '+/'\n \n # 得到2进制字符串流 ord:返回字符的unicode编码(为了能编码汉字)\n for ch in en_string:\n temp = int(bin(ord(ch)).replace('0b', ''))\n oldBin += '{:08}'.format(temp) # 把数字补到8位\n print(oldBin)\n\n # 切片 使得每6位合并得到字符串\n for i in range(0, len(oldBin), 6):\n tempStr.append('{:<06}'.format(oldBin[i:i+6])) # 左对齐长6 (补0)\n print(tempStr)\n\n # 字符串装数字后查表,得到编码结果字符串\n for item in tempStr:\n result += base64_list[int(item, 2)]\n print(result)\n\n\n # 补等号\n if len(result) % 4 == 2:\n result += '=='\n elif len(result) % 4 ==3:\n result += '='\n\n return result\n\ndef testb64module():\n res = base64.b64encode(\"苏州大学\".encode('utf-8'))\n print(str(res, 'utf-8')) # res是bytes结构\n orgin = base64.b64decode(res)\n print(str(orgin, 'utf-8'))\n\n res = base64.b64encode(\"苏州大学\".encode('gbk'))\n print(str(res, 'utf-8'))\n orgin = base64.b64decode(res)\n print(str(orgin, 'gbk'))\n\nif __name__ == \"__main__\":\n print(base64Encode(input(\"请输入要编码的字符串:\")))\n testb64module()\n","repo_name":"douzujun/Python-Foundation-Suda","sub_path":"苏大上机代码/python_project/02_MOOC习题/05_4Base64.py","file_name":"05_4Base64.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"zh","doc_type":"code","stars":51,"dataset":"github-code","pt":"16"} +{"seq_id":"5662358732","text":"# -*- coding:utf-8 -*-\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\nclass Solution:\n # 返回ListNode\n def ReverseList(self, pHead):\n pre = None\n while pHead is not None:\n next = pHead.next\n pHead.next = pre\n pre = pHead\n pHead = next\n return pre","repo_name":"w-qilong/Data_Structure","sub_path":"leecode/链表逆序.py","file_name":"链表逆序.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70103080328","text":"from django.test import TestCase\nfrom django.core import management\n\nfrom orm.models import Layer_Version, Machine, Recipe\n\n\nclass TestLayerIndexUpdater(TestCase):\n def test_run_lsupdates_command(self):\n # Load some release information for us to fetch from the layer index\n management.call_command('loaddata', 'poky')\n\n old_layers_count = Layer_Version.objects.count()\n old_recipes_count = Recipe.objects.count()\n old_machines_count = Machine.objects.count()\n\n # Now fetch the metadata from the layer index\n management.call_command('lsupdates')\n\n self.assertTrue(Layer_Version.objects.count() > old_layers_count,\n \"lsupdates ran but we still have no more layers!\")\n self.assertTrue(Recipe.objects.count() > old_recipes_count,\n \"lsupdates ran but we still have no more Recipes!\")\n self.assertTrue(Machine.objects.count() > old_machines_count,\n \"lsupdates ran but we still have no more Machines!\")\n","repo_name":"openbmc/openbmc","sub_path":"poky/bitbake/lib/toaster/tests/commands/test_lsupdates.py","file_name":"test_lsupdates.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":1525,"dataset":"github-code","pt":"16"} +{"seq_id":"44975444335","text":"import numpy as np\nfrom HypothesisFunc.function import hyp\n\n\ndef error(X, Y, theta):\n \"\"\"\n error function is defined as least squares\n :param X: is features matrix array, rows identify samples n. and columns identify feature\n :param Y: is the expected target values (set of values)\n :param theta: is a parameter set used ot evaluate my prediction\n :return: error values between prediction\n \"\"\"\n h = hyp(X, theta)\n loss = np.sum((h - Y) ** 2) / 2\n return loss\n\ndef error_V2(X, Y, theta, lamb=1):\n \"\"\"\n error function is defined as least squares but with Lasso Regression\n :param X: is features matrix array, rows identify samples n. and columns identify feature\n :param Y: is the expected target values (set of values)\n :param theta: is a parameter set used ot evaluate my prediction\n :return: error values between prediction\n \"\"\"\n # h = hyp(X, theta)\n first_sum = 0\n for index, el in enumerate(Y):\n inner_first_sum = 0\n for index_inner, el_inner in enumerate(theta):\n inner_first_sum += np.sum(X[index]*theta)\n first_sum += (Y[index] - inner_first_sum)**2\n\n first_sum *= 0.5\n\n second_sum = 0.5 * lamb * np.sum(abs(theta))\n loss = first_sum + second_sum\n return loss","repo_name":"allePansera/LinearRegression","sub_path":"ErrorFunc/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4141725756","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n# File : counter.py\n# Author : Jiayuan Mao\n# Email : maojiayuan@gmail.com\n# Date : 02/18/2018\n#\n# This file is part of Jacinle.\n# Distributed under terms of the MIT license.\n\nimport itertools\nimport threading\nimport queue\nimport collections\nimport time\n\n__all__ = ['Counter', 'CounterBasedEvent', 'CounterBasedMonitor']\n\n\nclass Counter(object):\n def __init__(self):\n self._cnt = itertools.count()\n self._ref = itertools.count()\n self._iter_cnt = iter(self._cnt)\n self._iter_ref = iter(self._ref)\n\n def tick(self):\n next(self._iter_cnt)\n\n def get(self):\n ref = next(self._iter_ref)\n cnt = next(self._iter_cnt)\n return cnt - ref\n\n\nclass CounterBasedEvent(object):\n \"\"\"Thread-safe counter-based callback invoker. When the counter is incremented, the system will check whether\n the counter has reached a target value. If so, the event will be set.\"\"\"\n def __init__(self, target, tqdm=None):\n self._cnt = itertools.count()\n self._iter_cnt = iter(self._cnt)\n\n self._target = target\n self._event = threading.Event()\n\n self._tick_mutex = threading.Lock()\n self._tqdm = tqdm\n\n def tick(self):\n with self._tick_mutex:\n return self.__tick()\n\n def __tick(self):\n value = next(self._iter_cnt)\n if self._tqdm is not None:\n self._tqdm.update(1)\n if value >= self._target:\n self._event.set()\n if self._tqdm is not None:\n self._tqdm.close()\n return value\n\n def is_set(self):\n return self._event.is_set()\n\n def clear(self):\n self._event.clear()\n\n def wait(self, timeout=None):\n return self._event.wait(timeout=timeout)\n\n\nclass CounterBasedMonitor(object):\n _displayer = None\n\n def __init__(self, counters=None, display_names=None, interval=1, printf=None):\n if counters is None:\n counters = ['DEFAULT']\n\n self._display_names = display_names\n self._counters = collections.OrderedDict([(n, Counter()) for n in counters])\n self._interval = interval\n self._printf = printf\n\n if self._printf is None:\n from jacinle.logging import get_logger\n logger = get_logger(__file__)\n self._printf = logger.info\n\n @property\n def _counter_names(self):\n return list(self._counters.keys())\n\n def tick(self, name=None):\n if len(self._counter_names) == 1:\n self._counters[self._counter_names[0]].tick()\n else:\n assert name is None, 'Must provide name if there are multiple counters.'\n self._counters[name].tick()\n\n def start(self):\n self._displayer = threading.Thread(target=self._display_thread, daemon=True)\n self._displayer.start()\n return self\n\n def _display(self, deltas, interval):\n names = self._display_names or self._counter_names\n if len(names) == 1:\n self._printf('Counter monitor {}: {} ticks/s.'.format(names[0], deltas[0]/interval))\n else:\n log_strs = ['Counter monitor:']\n for n, v in zip(names, deltas):\n log_strs.append('\\t{}: {} ticks/s'.format(n, v/interval))\n self._printf('\\n'.join(log_strs))\n\n def _display_thread(self):\n prev = [c.get() for _, c in self._counters.items()]\n while True:\n time.sleep(self._interval)\n curr = [c.get() for _, c in self._counters.items()]\n deltas = [c - p for p, c in zip(prev, curr)]\n prev = curr\n self._display(deltas, self._interval)\n","repo_name":"vacancy/Jacinle","sub_path":"jacinle/concurrency/counter.py","file_name":"counter.py","file_ext":"py","file_size_in_byte":3681,"program_lang":"python","lang":"en","doc_type":"code","stars":122,"dataset":"github-code","pt":"16"} +{"seq_id":"10087258002","text":"if __name__ == \"__main__\":\n n, k = map(int, input().split())\n n_list = list(map(int, input().split()))\n\n sum_list = [0, n_list[0], n_list[0] + n_list[1]]\n\n for i in range(2, n):\n sum_list.append(sum_list[i] + n_list[i])\n\n subsum_list = []\n for i in range(k, n + 1):\n subsum_list.append(sum_list[i] - sum_list[i - k])\n print(max(subsum_list))","repo_name":"vhzpt109/Algorithm","sub_path":"BOJ/2559_수열.py","file_name":"2559_수열.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4802457003","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name='XBRLi_converter', \n version='1.3',\n author=\"Filippo Ciceri\",\n author_email=\"filippo.ciceri@gmail.com\",\n description=\"Convert XBRLi documents into simple XBRL\",\n py_modules=['XBRLi_converter'],\n package_dir={'':'src'},\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/filippo-ciceri/XBRLI_converter\",\n dependency_links=['www.financialdatatree.com/download/XBRLI_converter'],\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n install_requires = [\n \"bs4\",\n \"lxml\"\n ],\n )\n","repo_name":"filippo-ciceri/XBRLI_converter","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"70060480968","text":"import torch\nfrom torch.autograd import Variable,grad\ndef make_trainable(model, val):\n for p in model.parameters():\n p.requires_grad = val\n\n\ndef calc_gradient_penalty(netD, real_data, fake_data,LAMBDA=10):\n BATCH=real_data.size()[0]\n alpha = torch.rand(BATCH, 1)\n #print(alpha.size(),real_data.size())\n alpha = alpha.unsqueeze(-1).unsqueeze(-1).expand(real_data.size())\n alpha = alpha.cuda()\n interpolates = alpha * real_data + ((1 - alpha) * fake_data)\n interpolates = interpolates.cuda()\n interpolates = Variable(interpolates, requires_grad=True)\n\n disc_interpolates = netD(interpolates)\n\n gradients = grad(outputs=disc_interpolates, inputs=interpolates,\n grad_outputs=torch.ones(disc_interpolates.size()).cuda(),\n create_graph=True, retain_graph=True, only_inputs=True)[0]\n\n gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA\n return gradient_penalty\n\ndef Gram(x):\n (bs, ch, h, w) = x.size()\n f = x.view(bs, ch, w*h)\n f_T = f.transpose(1, 2)\n G = f.bmm(f_T) / (ch * h * w)\n return G\n\n\n\ndef get_content_features(Vgg_net,img,mask):\n img=(img+1)*127.5\n img=img*((mask+1)/2)\n content_features = Vgg_net(img)[1]\n return content_features\n\ndef get_style_features(Vgg_net,img,mask):\n img=(img+1)*127.5\n img=img*((mask+1)/2)\n style_features = Vgg_net(img)\n # style_gram = [gram(fmap) for fmap in style_features]\n #style_feature_x = {}\n #style_feature_y = {}\n style_feature = {}\n for idx, feature in enumerate(style_features):\n #feature_x = feature[:, :, 1:, :] - feature[:, :, :-1, :]\n #feature_y = feature[:, :, :, 1:] - feature[:, :, :, :-1]\n #gram_x = Gram(feature_x)\n #gram_y = Gram(feature_y)\n gram = Gram(feature)\n #style_feature_x[idx] = gram_x\n #style_feature_y[idx] = gram_y\n style_feature[idx] = gram\n return style_feature\n\ndef get_style_loss(style_feature,fake_style_feature):\n style_loss=0.0\n for i in range(4):\n coff=float(1.0/4)\n fake_gram=fake_style_feature[i]\n style_gram=style_feature[i]\n style_loss+=coff*torch.mean(torch.abs((fake_gram-style_gram)))\n style_loss=torch.mean(style_loss)\n return style_loss\n\ndef get_content_loss(content_feature_real,content_feature_fake):\n coff=1\n content_loss=coff*torch.mean(torch.abs(content_feature_fake-content_feature_real))\n return content_loss\n\ndef get_tv_loss(img):\n x = torch.mean(torch.abs(img[:, :, 1:, :] - img[:, :, :-1, :]))\n y = torch.mean(torch.abs(img[:, :, :, 1:] - img[:, :, :, :-1]))\n return x+y\n","repo_name":"guyuchao/Fila-sGAN-pytorch","sub_path":"gycutils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"16"} +{"seq_id":"28277928391","text":"from turtle import Screen\nfrom snake import Snake\nfrom food import Food\nfrom scoreboard import ScoreBoard\nimport time\n\n# Screen setup\nscreen = Screen()\nscreen.setup(600, 600)\nscreen.bgcolor(\"black\")\nscreen.title(\"Snake Game\")\nscreen.tracer(0)\n\nsnake = Snake()\nfood = Food()\nscoreboard = ScoreBoard()\n\n# Makes snake move with keys\nscreen.listen()\nscreen.onkey(snake.up, \"Up\")\nscreen.onkey(snake.down, \"Down\")\nscreen.onkey(snake.left, \"Left\")\nscreen.onkey(snake.right, \"Right\")\n\n# Starts scoreboard\nscoreboard.update_scoreboard()\n\ngame_is_on = True\nwhile game_is_on:\n screen.update()\n time.sleep(0.1)\n\n snake.snake_move()\n\n # Detect collision with food and adds score\n if snake.head.distance(food) < 15:\n food.new_random_position()\n scoreboard.count_score()\n snake.extend()\n\n # Detect collision with wall\n if snake.head.xcor() > 280 or snake.head.xcor() < -280 or snake.head.ycor() > 280 or snake.head.ycor() < -280:\n scoreboard.reset_scoreboard()\n snake.reset_snake()\n\n # Detect collision with tail\n for segment in snake.segments[1:]:\n if snake.head.distance(segment) < 10:\n scoreboard.reset_scoreboard()\n snake.reset_snake()\n\n\nscreen.exitonclick()\n","repo_name":"implaymo/snake-game-","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4259754981","text":"import sys\n\ndef main():\n\ts = set()\n\tanswer = 0\n\tline_number = 0\n\tfor line in sys.stdin:\n\t\tline_number += 1\n\t\tline = line.strip()\n\t\tif line:\n\t\t\tfor c in line:\n\t\t\t\tif not (97 <= ord(c) <= 122):\n\t\t\t\t\tsys.exit('Line {} doesn\\'t match pattern!'.format(line_number))\n\t\t\t\ts.add(c)\n\t\telse:\n\t\t\tanswer += len(s)\n\t\t\ts.clear()\n\tanswer += len(s)\n\tprint(answer)\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"nightjuggler/aoc","sub_path":"2020/06.py","file_name":"06.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70588614088","text":"from keras.models import Sequential\r\nfrom keras.layers import LSTM,Dropout,Dense\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\nimport matplotlib.pyplot as plt\r\n# %matplotlib inline\r\n\r\nfrom matplotlib.pylab import rcParams\r\nrcParams['figure.figsize']=20,10\r\n\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom keras import models\r\n\r\nscaler=MinMaxScaler(feature_range=(0,1))\r\ndf_nse = pd.read_csv(\"acc.csv\")\r\ndf_nse[\"Date\"]=pd.to_datetime(df_nse.Date,format=\"%Y-%m-%d\")\r\ndf_nse.index=df_nse['Date']\r\ndata=df_nse.sort_index(ascending=True,axis=0)\r\nnew_data=pd.DataFrame(index=range(0,len(df_nse)),columns=['Date','wap'])\r\nfor i in range(0,len(data)):\r\n new_data[\"Date\"][i]=data['Date'][i]\r\n new_data[\"wap\"][i]=data[\"wap\"][i]\r\nnew_data.index=new_data.Date\r\nnew_data.drop(\"Date\",axis=1,inplace=True)\r\ndataset=new_data.values\r\ntrain=dataset[0:750,:]\r\nvalid=dataset[750:878,:]\r\nscaler=MinMaxScaler(feature_range=(0,1))\r\nscaled_data=scaler.fit_transform(dataset)\r\nx_train,y_train=[],[]\r\nfor i in range(60,len(train)):\r\n x_train.append(scaled_data[i-60:i,0])\r\n y_train.append(scaled_data[i,0])\r\n \r\nx_train,y_train=np.array(x_train),np.array(y_train)\r\nx_train=np.reshape(x_train,(x_train.shape[0],x_train.shape[1],1))\r\nmodel=models.load_model(\"saved_model.h5\")\r\ninputs=new_data[len(new_data)-len(valid)-60:].values\r\ninputs=inputs.reshape(-1,1)\r\ninputs=scaler.transform(inputs)\r\nX_test=[]\r\nfor i in range(60,inputs.shape[0]):\r\n X_test.append(inputs[i-60:i,0])\r\nX_test=np.array(X_test)\r\nX_test=np.reshape(X_test,(X_test.shape[0],X_test.shape[1],1))\r\nclosing_price=model.predict(X_test)\r\nclosing_price=scaler.inverse_transform(closing_price)\r\ntrain=new_data[:750]\r\nvalid=new_data[750:878]\r\ndate = df_nse['Date']\r\ndate = date.tolist()\r\nExpiry_date = df_nse['Expiry_date']\r\nExpiry_date = Expiry_date.tolist()\r\nlot_size = df_nse['lot_size']\r\nlot_size = lot_size.tolist()\r\nvalid['Predictions']=closing_price\r\nvalid['Date'] = date[750:878]\r\nvalid['Expiry_date']=Expiry_date[750:878]\r\nvalid['lot_size']=lot_size[750:878]\r\nplt.figure(figsize=(16,8))\r\n# print(valid[['wap',\"Predictions\",\"Date\",'Expiry_date','lot_size']])\r\nplt.plot(valid[['wap',\"Predictions\"]])\r\nvalid = valid.values.tolist()\r\n\r\n\r\n# print(valid)\r\ncount=0\r\nlast_expiry = str(valid[0][3])\r\navg = []\r\namt=0\r\nfor i in range(len(valid)):\r\n curr_expiry = str(valid[i][3])\r\n if curr_expiry!=last_expiry:\r\n last_expiry=curr_expiry\r\n avg.append(round(amt/count,2))\r\n amt=int(valid[i][1])\r\n count=0\r\n else:\r\n amt=amt+int(valid[i][1])\r\n count=count+1\r\nprint(avg)\r\n\r\n\r\ncount=0\r\ncounter=0\r\nsell = 0\r\nbuy = 0\r\ntotal_selling_price = 0\r\ntotal_buying_price = 0\r\nlast_expiry = str(valid[0][3])\r\n# for i in range(len(valid)):\r\n# curr_expiry = str(valid[i][3])\r\n# print(valid[i][1])\r\n# if curr_expiry!=last_expiry:\r\n# last_expiry=curr_expiry\r\n# counter=counter+1\r\n# print(counter)\r\n# break\r\n# if counter>=len(avg):\r\n# counter=len(avg)-1\r\n# if (int(valid[i][1])>avg[counter]):\r\n\r\n# plt.plot(valid[i][2], int(valid[i][1]), marker='v', color=\"red\")\r\n# # print(\"On \" + str(valid[i][2]) + \" Sell signal\")\r\n \r\n# sell=sell+1\r\n# total_selling_price=total_selling_price+round(abs(avg[counter]-int(valid[i][1])),2)*int(valid[i][4])\r\n# else:\r\n# plt.plot(valid[i][2], int(valid[i][1]), marker='v', color=\"green\")\r\n \r\n# buy=buy+1\r\n# total_buying_price=total_buying_price+round(avg[counter]-int(valid[i][1]),2)*int(valid[i][4])\r\n\r\n\r\n\r\ns_max=0\r\ns_min=276085842\r\nfor i in range(len(valid)):\r\n curr_expiry = str(valid[i][3])\r\n # print(valid[i][1])\r\n if curr_expiry!=last_expiry:\r\n last_expiry=curr_expiry\r\n total_selling_price=total_selling_price+(s_max-avg[counter])*int(valid[i][4])\r\n total_buying_price=total_buying_price+(avg[counter]-s_min)*int(valid[i][4])\r\n counter=counter+1\r\n # print(s_min,s_max)\r\n s_max=0\r\n s_min=276085842\r\n \r\n if counter>=len(avg):\r\n counter=len(avg)-1\r\n if (int(valid[i][1])>s_max):\r\n s_max=int(valid[i][1])\r\n \r\n if (int(valid[i][1])=len(avg):\r\n counter=len(avg)-1\r\n if (int(valid[i][0])>s_max):\r\n s_max=int(valid[i][0])\r\n \r\n if (int(valid[i][0])(buying_price*lot_size):\r\n# print(\"On \" + str(valid[i][2]) + \" Profit is \" + str(round((((valid[i][1])*lot_size)-(buying_price*lot_size))/100,2)) + \" % \")\r\n# else:\r\n# print(\"On \" + str(valid[i][2]) + \" Loss is \" + str(round(abs(((valid[i][1])*lot_size)-(buying_price*lot_size))/100,2)) + \" % \")","repo_name":"BrooCode/trading","sub_path":"min_max_profit.py","file_name":"min_max_profit.py","file_ext":"py","file_size_in_byte":6179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"72385384007","text":"from flask import Flask, request\nfrom flask_restful import Resource, Api, reqparse\nfrom torchworker import TorchWorker\nfrom inpaintingworker import InpaintingWorker\nfrom PIL import Image\nimport base64\nfrom io import BytesIO, StringIO\nimport logging\nimport sys\nfrom flask_lt import run_with_lt\nimport argparse\nfrom imageworker import encode_image, decode_image\n\nDEBUG = False\nWORKER = None\n\n# Define the main app and api\napp = Flask(__name__)\napi = Api(app)\n\n# define a parser for the arguments to the api\nreq_parser = reqparse.RequestParser()\n# parser.add_argument(\"prompt\", type=str, help=\"The prompt to execute\", required=True)\nreq_parser.add_argument(\"steps\", type=int, help=\"The number of steps to execute\", default=50)\nreq_parser.add_argument(\"size\", type=int, help=\"The size of the finished image, size x size\", default=512)\nreq_parser.add_argument(\"seed\", type=int, help=\"The seed to initialize the random generator\", default=42)\nreq_parser.add_argument(\"image\", help=\"Base64 encoded input image, optional\")\nreq_parser.add_argument(\"strength\", help=\"The strength of an input image\", default=0.5)\n\ndef create_image_filename(prompt, seed):\n return f\"{prompt.replace(' ', '_')}_{seed}_.png\"\n \ndef inpaint_image(prompt, image, steps=10, seed=42, debug=False, strength=0.5):\n if debug:\n image = Image.open(\"./images/debug_image.png\")\n else:\n image = WORKER.execute_prompt(image, prompt, seeds=[seed], steps=steps, strength=strength)[0]\n \n return encode_image(image)\n \ndef generate_image(prompt, steps=10, seed=42, debug=False, size=512):\n if debug:\n image = Image.open(\"./images/debug_image.png\")\n \n else:\n image = WORKER.execute_prompt(prompt, steps=steps, seeds=[seed], size=size)[0]\n \n return encode_image(image)\n \nclass Inpaintination(Resource):\n def put(self, prompt):\n args = req_parser.parse_args()\n args['prompt'] = prompt\n \n # actually parse the input image\n args['image'] = decode_image(args['image'])\n args['image'] = inpaint_image(prompt, args['image'], steps=args['steps'], seed=args['seed'], debug=DEBUG, strength=args['strength'])\n args['filename'] = create_image_filename(prompt, args['seed'])\n return args\n\nclass Imagination(Resource):\n def get(self, prompt):\n args = req_parser.parse_args(strict=True)\n return args\n\n def put(self, prompt):\n args = req_parser.parse_args()\n args['prompt'] = prompt\n args['image'] = generate_image(prompt, steps=args['steps'], seed=args['seed'], debug=DEBUG, size=args['size'])\n args['filename'] = create_image_filename(prompt, args['seed'])\n return args\n\n# Add endpoint for our prompt resource\napi.add_resource(Imagination, '/')\napi.add_resource(Inpaintination, '/inpaint/')\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-d', '--debug', help=\"Run server in debug mode\", default=False, action='store_true')\n parser.add_argument('-lt', help=\"Run server with localtunnel\", default=False, action='store_true')\n parser.add_argument('-p', '--inpaint', help=\"Use inpainting Api\", default=False, action='store_true')\n args = parser.parse_args()\n \n # any cmd arg is explicity parsed as running in debug mode\n if args.debug:\n DEBUG = True\n else:\n if args.inpaint:\n WORKER = InpaintingWorker()\n else:\n WORKER = TorchWorker()\n\n if args.lt:\n run_with_lt(app, \"Lando\")\n\n app.run(debug=DEBUG)","repo_name":"DerLando/imaginepy","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"35650454519","text":"from __future__ import print_function\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.special import erf\n\n\ndef create_absorption_line(bin_size, max_line_width, line_offset, area):\n \"\"\"\n\n :param bin_size: bin size in wavelength units\n :param max_line_width: extents of the grid in wavelength units (on each side)\n :param line_offset: amount in wavelength units to shift the peak from 0\n :param area: profile area in wavelength units (* dimensionless flux)\n :return:\n \"\"\"\n #\n\n # create an array large enough to reach at least max_line_width on both sides.\n ar_erf_wavelength = np.arange(0, 2 * max_line_width + 2 * bin_size, bin_size)\n # choose limits such that after numeric differentiation they are centered on 0.\n ar_erf_wavelength = ar_erf_wavelength - ar_erf_wavelength[ar_erf_wavelength.size // 2 - 1] - bin_size / 2\n # compute the error function\n ar_erf = erf(ar_erf_wavelength / area - line_offset) * area\n # differentiate the error function to get an area preserving gaussian.\n # after differentiation the wavelength values are shifted.\n ar_diff_wavelength = ar_erf_wavelength[:-1] + bin_size / 2\n ar_gaussian_profile = np.diff(ar_erf) / 2 / bin_size\n return ar_diff_wavelength, ar_gaussian_profile\n\n\ntest_bin_size = 0.64\n# max_line_width should be far enough so that the area of the gaussian outside the boundary is 0.\n# note that line_offset should probably be 0 for our use.\n\nar_wavelength, ar_profile = create_absorption_line(\n bin_size=test_bin_size, max_line_width=10, line_offset=5.0, area=0.0654)\n# verify that the area under the gaussian is correct\nprint(\"area:\", ar_profile.sum() * test_bin_size)\nplt.plot(ar_wavelength, 1 - ar_profile)\nplt.show()\n","repo_name":"elaav/Astro","sub_path":"Yishay/physics_functions/absorption_line_helper.py","file_name":"absorption_line_helper.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"72286732487","text":"from rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom .serializers import GameSerializer\nfrom market.models import Game\n\n\n@api_view(['GET'])\ndef getRoutes(request):\n routes = [\n {'GET': 'api/games'},\n {'GET': 'api/games/id'},\n {'GET': 'api/games/id/review'},\n\n {'POST': 'api/users/token'},\n {'POST': 'api/users/token/refresh'},\n ]\n\n return Response(routes)\n\n\n@api_view(['GET'])\ndef getGames(request):\n games = Game.objects.all()\n serializer = GameSerializer(games, many=True)\n return Response(serializer.data)\n\n\n@api_view(['GET'])\ndef getGame(request, pk):\n games = Game.objects.get(id=pk)\n serializer = GameSerializer(games, many=False)\n return Response(serializer.data)","repo_name":"andreiec/django-game-store","sub_path":"gamestore/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"31299570513","text":"from fastapi import FastAPI, BackgroundTasks\r\nfrom workers.project_manager import ProjectManager\r\nfrom pydantic import BaseModel\r\nfrom typing import Optional\r\nfrom time import sleep\r\n\r\napp = FastAPI()\r\n\r\n# update target channel scraping\r\n# @app.get(\"/scraping-start\")\r\n# async def scraping_with_target_date(Background_tasks : BackgroundTasks):\r\n# def run():\r\n# while True:\r\n# manager = ProjectManager()\r\n# message = manager.job_init(target=True)\r\n# sleep(7200) # 2 hours\r\n# Background_tasks.add_task(run)\r\n\r\n# lagacy total channel scraping\r\n@app.get(\"/scraping-start\")\r\nasync def scraping_with_target_date(Background_tasks : BackgroundTasks):\r\n def run():\r\n while True:\r\n manager = ProjectManager()\r\n message = manager.job_init()\r\n sleep(43200) # 12 hours\r\n Background_tasks.add_task(run)\r\n\r\n@app.get('/')\r\ndef main():\r\n return 'chancewave scraper'\r\n\r\nclass INITIAL_PROCESS_SOURCE(BaseModel):\r\n channel_code : Optional[str] = ''\r\n pagesize : int = 10\r\n page : int = 0\r\n\r\n@app.post('/get-channel-data')\r\nasync def get_channel_data(SOURCE:INITIAL_PROCESS_SOURCE):\r\n channel_code = SOURCE.channel_code\r\n count = SOURCE.pagesize\r\n page = SOURCE.page\r\n manager = ProjectManager()\r\n data = manager.get_data(channel_code, count, page)\r\n return data\r\n\r\n# @app.post('/get-total-channel-data')\r\n# async def get_total_data(SOURCE :INITIAL_PROCESS_SOURCE):\r\n# channel_code = SOURCE.channel_code\r\n# count = SOURCE.count\r\n# manager = ProjectManager()\r\n# data = manager.get_data(channel_code, count)\r\n# return data\r\n\r\nclass DEV_SOURCE(BaseModel):\r\n channel_code : str\r\n\r\nfrom datetime import datetime\r\n\r\n@app.post('/dev-test')\r\nasync def test(DEV_SOURCE :DEV_SOURCE):\r\n now = datetime.now()\r\n channel_code = DEV_SOURCE.channel_code\r\n manager = ProjectManager()\r\n manager.scraping_dev_test(channel_code)\r\n print(datetime.now() - now)","repo_name":"choiseulong/chancewave_scraping","sub_path":"scrapingProject/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"20795297959","text":"import datetime as dt\nfrom flask import current_app\nfrom flask_mongoengine import MongoEngine\nimport flask_jwt_simple as jwt\n\nfrom flaskr._users import Users\n\n\n# DATABASE ---------------------------------------------------------------------\nDB = MongoEngine()\n\n# JWT AUTHORIZATION ------------------------------------------------------------\nJWT = jwt.JWTManager()\n\n@JWT.jwt_data_loader\ndef add_claims_to_access_token(identity):\n user = Users.objects(username=identity).first()\n\n now = dt.datetime.utcnow()\n return {\n 'exp': now + current_app.config['JWT_EXPIRES'],\n 'iat': now,\n 'nbf': now,\n 'sub': {\n 'identity': identity,\n 'email': user.email,\n 'is_admin': user.is_admin\n }\n }\n","repo_name":"Sarctiann/ACIN","sub_path":"flaskr/extensions.py","file_name":"extensions.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8609831898","text":"import yaml\nimport json\nimport urllib\nimport urllib.request as urllib2\nfrom lxml import etree\nfrom bs4 import BeautifulSoup\nfrom kafka import KafkaProducer\nfrom kafka.errors import KafkaError\nimport time\nfrom time import sleep\n#docker run -d --name kafka -p 9999:9999 -p 9092:9092 --env KAFKA_ADVERTISED_HOST_NAME=kafka -e KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 --network kafka-net XXXXXXXXXXXXXXXXXXXXXXXXXXX\n#create the object, assign it to a variable\ncount = 0\nnbitems = 10260\nnbseconds = 3600\nlastcount = 0\ntopicName = 'rss-flow'\nmillis = time.time() * 1000.0\nproducer = KafkaProducer(bootstrap_servers=['kafka:9092'])\nwith open(\"content.rss\") as stream:\n for line in stream:\n xmlString=json.loads(line)\n if xmlString :\n try: \n tree = etree.fromstring(bytes(bytearray(xmlString, encoding='iso-8859-1')))\n for channel in tree.xpath(\"/rss/channel\"):\n titleChannel=channel.xpath(\"title\")[0].text\n descriptionChannel=channel.xpath(\"title\")[0].text\n for item in channel.xpath(\"item\"):\n try:\n data = {}\n data['title']=item.xpath(\"title\")[0].text\n if item.xpath('pubDate'):\n data['date']=item.xpath(\"pubDate\")[0].text\n if item.xpath(\"description\"):\n data['description']=item.xpath(\"description\")[0].text\n if item.xpath(\"creator\"):\n data['author']=item.xpath(\"creator\")[0].text\n if item.xpath(\"author\"):\n data['author']=item.xpath(\"author\")[0].text\n if item.xpath(\"category\"):\n categories=[]\n for cat in item.xpath(\"category\"):\n categories.append(cat.text)\n data['categories']=categories\n if count-lastcount >= nbitems/nbseconds :\n sleeper=time.time() * 1000.0-millis\n print(\"send \"+ str(count-lastcount) +\" messages. \"+str(count)+\"/\"+str(nbitems), flush=True)\n if sleeper>0 :\n sleep((1000-sleeper)/1000.0)\n millis = time.time() * 1000\n lastcount=count\n producer.flush()\n count=count+1\n producer.send(topicName, str.encode(json.dumps(data)))\n except Exception as e :\n print (\"Error: \",e)\n except Exception as e :\n print (\"Error: \",e)\nprint (\"nb:\"+str(count))\nproducer.close() \n \n \n\n","repo_name":"buidiaconseil/DemoKafkaAkkaSpark","sub_path":"pushRSS.py","file_name":"pushRSS.py","file_ext":"py","file_size_in_byte":2977,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"28622915598","text":"#!/usr/bin/env python3.7\nimport os\nfrom sys import exit\ntry:\n import numpy\n import matplotlib.pyplot as plt\nexcept ModuleNotFoundError:\n print(\"Use 'pip3 install numpy; pip3 install matplotlib' first.\")\n exit(1)\n\n\ndef read_data(file):\n try:\n with open(file, 'r') as file:\n data = []\n data.append([int(line.strip('\\n').strip(' ').split(' ')[1]) for line in file])\n file.seek(0)\n data.append([line.strip('\\n').strip(' ').split(' ')[0] for line in file])\n return data\n except FileNotFoundError:\n print('Where the hell is the data file? :O')\n print(f'Given file path: {file}')\n exit(1)\n\n\ndef get_data(data_file):\n file = os.path.join(os.path.dirname(__file__), f'{data_file}')\n data = read_data(file)\n return data\n\n\ndef make_calculations(data):\n mean = numpy.mean(data)\n std_dev = numpy.std(data)\n return [mean, std_dev]\n\n\ndef plot(data):\n tasks = data[0]\n time = [date[5:10] for date in data[1]]\n\n mean, std_dev = make_calculations(tasks)\n\n green_range = (mean - std_dev, mean + std_dev)\n red_range = (mean - 2 * std_dev, mean + 2 * std_dev)\n\n fig = plt.figure(1)\n ax = fig.add_subplot(111)\n\n ax.plot(time, tasks, '-', color='black', linewidth=1)\n ax.scatter(time, tasks, color='black', marker='o', s=5)\n ax.plot([mean for i in time], color='lightblue', linewidth=1)\n ax.plot([green_range for i in time], color='green', linewidth=1)\n ax.plot([red_range for i in time], color='red', linewidth=1)\n ax.set(title=\"Tasks per day\", xlabel=\"days\", ylabel=\"numer of tasks\")\n plt.show()\n\n\ndef main():\n data = get_data('tasks_stat.txt')\n plot(data)\n\n\nif __name__ == '__main__':\n main()\n\n# axis x - capital numbers\n","repo_name":"mateuszkanabrocki/Projects","sub_path":"run_charts/project/tasks_chart.py","file_name":"tasks_chart.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"28733420801","text":"from tkinter import *\nfrom tkinter import ttk\nfrom tkinter import messagebox\nimport csv\n\n\ndef on_click():\n idx = ent_name.get()\n with open('slambook.csv', 'r') as f:\n data = f.readlines()\n for d in data:\n if d.find(str(idx)) != -1:\n text.insert(INSERT, d)\n break\n\n\ndef to_delete():\n lines = list()\n members = ent_name.get()\n with open('slambook.csv', 'r') as readFile:\n reader = csv.reader(readFile)\n for row in reader:\n lines.append(row)\n for field in row:\n if field == members:\n lines.remove(row)\n with open('slambook.csv', 'w', newline='') as writeFile:\n\n writer = csv.writer(writeFile)\n writer.writerows(lines)\n messagebox.askyesno(\"Option\", \"Do you want to DELETE\")\n root.destroy()\n\n\nroot = Tk()\nroot.geometry(\"1500x1200\")\nroot.configure(background=\"yellow\")\n\nLabel(root, text=\"Enter Name :\", font=(\"Arial Bold\", 28), bg='black', fg='white').place(x=300, y=130)\nLabel(root, text=\"(in caps)\", font=(\"Arial italic\", 15),bg=\"yellow\").place(x=450, y=180)\nLabel(root, text=\"Output : \", font=(\"Arial Bold\", 28), bg='black', fg='white').place(x=300, y=250)\n\n# entry box\nent_name = ttk.Entry(root, font=(\"helvetica\", 20, \"bold\"), justify=CENTER)\nent_name.pack(anchor=\"ne\", padx=300, pady=140)\n\n# buttons\nsearch = Button(root, text=\"Search\", width=15, height=1, font=(\"Arial Bold\", 12),\n bg='black', fg='white', command=on_click).place(x=500, y=500)\nback = Button(root, text=\"Back\", width=15, height=1, font=(\"Arial Bold\", 12), bg='black', fg='white',\n command=root.destroy).place(x=900, y=500)\n\ndelete = Button(root, text=\"Delete\", width=15, height=1, font=(\"Arial Bold\", 12), bg='black', fg='white',\n command=to_delete).place(x=700, y=500)\n# text box\ntext = Text(root, width=150, height=6)\ntext.pack(anchor=\"se\", padx=320, pady=20)\nroot.mainloop()\n","repo_name":"alaskSs/Slambook_GUI","sub_path":"delete_gui.py","file_name":"delete_gui.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"17693770995","text":"from collections import OrderedDict\nfrom typing import Dict, Sequence\n\nfrom .. import Provider as PersonProvider\n\n\n# See transliteration table https://en.wikipedia.org/wiki/Romanization_of_Russian#Transliteration_table\ndef translit(text: str) -> str:\n translit_dict: Dict[str, str] = {\n \"а\": \"a\",\n \"б\": \"b\",\n \"в\": \"v\",\n \"г\": \"g\",\n \"д\": \"d\",\n \"е\": \"e\",\n \"ё\": \"e\",\n \"ж\": \"zh\",\n \"з\": \"z\",\n \"и\": \"i\",\n \"й\": \"y\",\n \"к\": \"k\",\n \"л\": \"l\",\n \"м\": \"m\",\n \"н\": \"n\",\n \"о\": \"o\",\n \"п\": \"p\",\n \"р\": \"r\",\n \"с\": \"s\",\n \"т\": \"t\",\n \"у\": \"u\",\n \"ф\": \"f\",\n \"х\": \"kh\",\n \"ц\": \"ts\",\n \"ч\": \"ch\",\n \"ш\": \"sh\",\n \"щ\": \"shch\",\n \"ъ\": \"\",\n \"ы\": \"y\",\n \"ь\": \"\",\n \"э\": \"e\",\n \"ю\": \"yu\",\n \"я\": \"ya\",\n \"А\": \"A\",\n \"Б\": \"B\",\n \"В\": \"V\",\n \"Г\": \"G\",\n \"Д\": \"D\",\n \"Е\": \"Ye\",\n \"Ë\": \"E\",\n \"Ж\": \"Zh\",\n \"З\": \"Z\",\n \"И\": \"I\",\n \"Й\": \"Y\",\n \"К\": \"K\",\n \"Л\": \"L\",\n \"М\": \"M\",\n \"Н\": \"N\",\n \"О\": \"O\",\n \"П\": \"P\",\n \"Р\": \"R\",\n \"С\": \"S\",\n \"Т\": \"T\",\n \"У\": \"U\",\n \"Ф\": \"F\",\n \"Х\": \"Kh\",\n \"Ц\": \"Ts\",\n \"Ч\": \"Ch\",\n \"Ш\": \"Sh\",\n \"Щ\": \"Shch\",\n \"Ы\": \"Y\",\n \"Э\": \"E\",\n \"Ю\": \"Yu\",\n \"Я\": \"Ya\",\n }\n for letter in text:\n if letter.isalpha():\n text = text.replace(letter, translit_dict[letter])\n return text\n\n\nclass Provider(PersonProvider):\n formats_male: Dict[str, float] = OrderedDict(\n (\n (\"{{last_name_male}} {{first_name_male}} {{middle_name_male}}\", 0.49),\n (\"{{first_name_male}} {{middle_name_male}} {{last_name_male}}\", 0.49),\n (\n \"{{prefix_male}} {{last_name_male}} {{first_name_male}} {{middle_name_male}}\",\n 0.02,\n ),\n )\n )\n\n formats_female: Dict[str, float] = OrderedDict(\n (\n (\"{{last_name_female}} {{first_name_female}} {{middle_name_female}}\", 0.49),\n (\"{{first_name_female}} {{middle_name_female}} {{last_name_female}}\", 0.49),\n (\n \"{{prefix_female}} {{last_name_female}} {{first_name_female}} {{middle_name_female}}\",\n 0.02,\n ),\n )\n )\n\n # Using random_element's dictionary weighting means that the\n # formats = formats_male + formats_female\n # has to be replaced with something dict and python 2.x compatible\n\n formats: Dict[str, float] = formats_male.copy()\n formats.update(formats_female)\n\n first_names_male = (\n \"Август\",\n \"Авдей\",\n \"Аверкий\",\n \"Аверьян\",\n \"Авксентий\",\n \"Автоном\",\n \"Агап\",\n \"Агафон\",\n \"Аггей\",\n \"Адам\",\n \"Адриан\",\n \"Азарий\",\n \"Аким\",\n \"Александр\",\n \"Алексей\",\n \"Амвросий\",\n \"Амос\",\n \"Ананий\",\n \"Анатолий\",\n \"Андрей\",\n \"Андрон\",\n \"Андроник\",\n \"Аникей\",\n \"Аникита\",\n \"Анисим\",\n \"Антип\",\n \"Антонин\",\n \"Аполлинарий\",\n \"Аполлон\",\n \"Арефий\",\n \"Аристарх\",\n \"Аркадий\",\n \"Арсений\",\n \"Артемий\",\n \"Артем\",\n \"Архип\",\n \"Аскольд\",\n \"Афанасий\",\n \"Афиноген\",\n \"Бажен\",\n \"Богдан\",\n \"Болеслав\",\n \"Борис\",\n \"Борислав\",\n \"Боян\",\n \"Бронислав\",\n \"Будимир\",\n \"Вадим\",\n \"Валентин\",\n \"Валерий\",\n \"Валерьян\",\n \"Варлаам\",\n \"Варфоломей\",\n \"Василий\",\n \"Вацлав\",\n \"Велимир\",\n \"Венедикт\",\n \"Вениамин\",\n \"Викентий\",\n \"Виктор\",\n \"Викторин\",\n \"Виссарион\",\n \"Виталий\",\n \"Владилен\",\n \"Владлен\",\n \"Владимир\",\n \"Владислав\",\n \"Влас\",\n \"Всеволод\",\n \"Всемил\",\n \"Всеслав\",\n \"Вышеслав\",\n \"Вячеслав\",\n \"Гаврила\",\n \"Галактион\",\n \"Гедеон\",\n \"Геннадий\",\n \"Георгий\",\n \"Герасим\",\n \"Герман\",\n \"Глеб\",\n \"Гордей\",\n \"Гостомысл\",\n \"Гремислав\",\n \"Григорий\",\n \"Гурий\",\n \"Давыд\",\n \"Данила\",\n \"Дементий\",\n \"Демид\",\n \"Демьян\",\n \"Денис\",\n \"Дмитрий\",\n \"Добромысл\",\n \"Доброслав\",\n \"Дорофей\",\n \"Евгений\",\n \"Евграф\",\n \"Евдоким\",\n \"Евлампий\",\n \"Евсей\",\n \"Евстафий\",\n \"Евстигней\",\n \"Егор\",\n \"Елизар\",\n \"Елисей\",\n \"Емельян\",\n \"Епифан\",\n \"Еремей\",\n \"Ермил\",\n \"Ермолай\",\n \"Ерофей\",\n \"Ефим\",\n \"Ефрем\",\n \"Захар\",\n \"Зиновий\",\n \"Зосима\",\n \"Иван\",\n \"Игнатий\",\n \"Игорь\",\n \"Измаил\",\n \"Изот\",\n \"Изяслав\",\n \"Иларион\",\n \"Илья\",\n \"Иннокентий\",\n \"Иосиф\",\n \"Ипат\",\n \"Ипатий\",\n \"Ипполит\",\n \"Ираклий\",\n \"Исай\",\n \"Исидор\",\n \"Казимир\",\n \"Каллистрат\",\n \"Капитон\",\n \"Карл\",\n \"Карп\",\n \"Касьян\",\n \"Ким\",\n \"Кир\",\n \"Кирилл\",\n \"Клавдий\",\n \"Климент\",\n \"Кондрат\",\n \"Кондратий\",\n \"Конон\",\n \"Константин\",\n \"Корнил\",\n \"Кузьма\",\n \"Куприян\",\n \"Лавр\",\n \"Лаврентий\",\n \"Ладимир\",\n \"Ладислав\",\n \"Лазарь\",\n \"Лев\",\n \"Леон\",\n \"Леонид\",\n \"Леонтий\",\n \"Лонгин\",\n \"Лука\",\n \"Лукьян\",\n \"Лучезар\",\n \"Любим\",\n \"Любомир\",\n \"Любосмысл\",\n \"Макар\",\n \"Максим\",\n \"Максимильян\",\n \"Мариан\",\n \"Марк\",\n \"Мартын\",\n \"Мартьян\",\n \"Матвей\",\n \"Мефодий\",\n \"Мечислав\",\n \"Милан\",\n \"Милен\",\n \"Милий\",\n \"Милован\",\n \"Мина\",\n \"Мир\",\n \"Мирон\",\n \"Мирослав\",\n \"Митофан\",\n \"Михаил\",\n \"Михей\",\n \"Модест\",\n \"Моисей\",\n \"Мокей\",\n \"Мстислав\",\n \"Назар\",\n \"Наркис\",\n \"Натан\",\n \"Наум\",\n \"Нестор\",\n \"Никандр\",\n \"Никанор\",\n \"Никита\",\n \"Никифор\",\n \"Никодим\",\n \"Николай\",\n \"Никон\",\n \"Нифонт\",\n \"Олег\",\n \"Олимпий\",\n \"Онуфрий\",\n \"Орест\",\n \"Осип\",\n \"Остап\",\n \"Остромир\",\n \"Павел\",\n \"Панкратий\",\n \"Панкрат\",\n \"Пантелеймон\",\n \"Панфил\",\n \"Парамон\",\n \"Парфен\",\n \"Пахом\",\n \"Петр\",\n \"Пимен\",\n \"Платон\",\n \"Поликарп\",\n \"Порфирий\",\n \"Потап\",\n \"Пров\",\n \"Прокл\",\n \"Прокофий\",\n \"Прохор\",\n \"Радим\",\n \"Радислав\",\n \"Радован\",\n \"Ратибор\",\n \"Ратмир\",\n \"Родион\",\n \"Роман\",\n \"Ростислав\",\n \"Рубен\",\n \"Руслан\",\n \"Рюрик\",\n \"Савва\",\n \"Савватий\",\n \"Савелий\",\n \"Самсон\",\n \"Самуил\",\n \"Светозар\",\n \"Святополк\",\n \"Святослав\",\n \"Севастьян\",\n \"Селиван\",\n \"Селиверст\",\n \"Семен\",\n \"Серафим\",\n \"Сергей\",\n \"Сигизмунд\",\n \"Сидор\",\n \"Сила\",\n \"Силантий\",\n \"Сильвестр\",\n \"Симон\",\n \"Сократ\",\n \"Соломон\",\n \"Софон\",\n \"Софрон\",\n \"Спартак\",\n \"Спиридон\",\n \"Станимир\",\n \"Станислав\",\n \"Степан\",\n \"Стоян\",\n \"Тарас\",\n \"Твердислав\",\n \"Творимир\",\n \"Терентий\",\n \"Тимофей\",\n \"Тимур\",\n \"Тит\",\n \"Тихон\",\n \"Трифон\",\n \"Трофим\",\n \"Ульян\",\n \"Устин\",\n \"Фадей\",\n \"Федор\",\n \"Федосий\",\n \"Федот\",\n \"Феликс\",\n \"Феоктист\",\n \"Феофан\",\n \"Ферапонт\",\n \"Филарет\",\n \"Филимон\",\n \"Филипп\",\n \"Фирс\",\n \"Флорентин\",\n \"Фока\",\n \"Фома\",\n \"Фортунат\",\n \"Фотий\",\n \"Фрол\",\n \"Харитон\",\n \"Харлампий\",\n \"Христофор\",\n \"Чеслав\",\n \"Эдуард\",\n \"Эммануил\",\n \"Эмиль\",\n \"Эраст\",\n \"Эрнест\",\n \"Эрнст\",\n \"Ювеналий\",\n \"Юлиан\",\n \"Юлий\",\n \"Юрий\",\n \"Яков\",\n \"Ян\",\n \"Якуб\",\n \"Януарий\",\n \"Ярополк\",\n \"Ярослав\",\n )\n\n first_names_female = (\n \"Агата\",\n \"Агафья\",\n \"Акулина\",\n \"Алевтина\",\n \"Александра\",\n \"Алина\",\n \"Алла\",\n \"Анастасия\",\n \"Ангелина\",\n \"Анжела\",\n \"Анжелика\",\n \"Анна\",\n \"Антонина\",\n \"Валентина\",\n \"Валерия\",\n \"Варвара\",\n \"Василиса\",\n \"Вера\",\n \"Вероника\",\n \"Виктория\",\n \"Галина\",\n \"Глафира\",\n \"Дарья\",\n \"Евгения\",\n \"Евдокия\",\n \"Евпраксия\",\n \"Евфросиния\",\n \"Екатерина\",\n \"Елена\",\n \"Елизавета\",\n \"Жанна\",\n \"Зинаида\",\n \"Зоя\",\n \"Иванна\",\n \"Ираида\",\n \"Ирина\",\n \"Ия\",\n \"Кира\",\n \"Клавдия\",\n \"Ксения\",\n \"Лариса\",\n \"Лидия\",\n \"Лора\",\n \"Лукия\",\n \"Любовь\",\n \"Людмила\",\n \"Майя\",\n \"Маргарита\",\n \"Марина\",\n \"Мария\",\n \"Марфа\",\n \"Милица\",\n \"Надежда\",\n \"Наина\",\n \"Наталья\",\n \"Нина\",\n \"Нинель\",\n \"Нонна\",\n \"Оксана\",\n \"Октябрина\",\n \"Олимпиада\",\n \"Ольга\",\n \"Пелагея\",\n \"Полина\",\n \"Прасковья\",\n \"Раиса\",\n \"Регина\",\n \"Светлана\",\n \"Синклитикия\",\n \"София\",\n \"Таисия\",\n \"Тамара\",\n \"Татьяна\",\n \"Ульяна\",\n \"Фаина\",\n \"Феврония\",\n \"Фёкла\",\n \"Элеонора\",\n \"Эмилия\",\n \"Юлия\",\n )\n\n first_names = first_names_male + first_names_female\n\n last_names_male = (\n \"Смирнов\",\n \"Иванов\",\n \"Кузнецов\",\n \"Попов\",\n \"Соколов\",\n \"Лебедев\",\n \"Козлов\",\n \"Новиков\",\n \"Морозов\",\n \"Петров\",\n \"Волков\",\n \"Соловьев\",\n \"Васильев\",\n \"Зайцев\",\n \"Павлов\",\n \"Семенов\",\n \"Голубев\",\n \"Виноградов\",\n \"Богданов\",\n \"Воробьев\",\n \"Федоров\",\n \"Михайлов\",\n \"Беляев\",\n \"Тарасов\",\n \"Белов\",\n \"Комаров\",\n \"Орлов\",\n \"Киселев\",\n \"Макаров\",\n \"Андреев\",\n \"Ковалев\",\n \"Ильин\",\n \"Гусев\",\n \"Титов\",\n \"Кузьмин\",\n \"Кудрявцев\",\n \"Баранов\",\n \"Куликов\",\n \"Алексеев\",\n \"Степанов\",\n \"Яковлев\",\n \"Сорокин\",\n \"Сергеев\",\n \"Романов\",\n \"Захаров\",\n \"Борисов\",\n \"Королев\",\n \"Герасимов\",\n \"Пономарев\",\n \"Григорьев\",\n \"Лазарев\",\n \"Медведев\",\n \"Ершов\",\n \"Никитин\",\n \"Соболев\",\n \"Рябов\",\n \"Поляков\",\n \"Цветков\",\n \"Данилов\",\n \"Жуков\",\n \"Фролов\",\n \"Журавлев\",\n \"Николаев\",\n \"Крылов\",\n \"Максимов\",\n \"Сидоров\",\n \"Осипов\",\n \"Белоусов\",\n \"Федотов\",\n \"Дорофеев\",\n \"Егоров\",\n \"Матвеев\",\n \"Бобров\",\n \"Дмитриев\",\n \"Калинин\",\n \"Анисимов\",\n \"Петухов\",\n \"Антонов\",\n \"Тимофеев\",\n \"Никифоров\",\n \"Веселов\",\n \"Филиппов\",\n \"Марков\",\n \"Большаков\",\n \"Суханов\",\n \"Миронов\",\n \"Ширяев\",\n \"Александров\",\n \"Коновалов\",\n \"Шестаков\",\n \"Казаков\",\n \"Ефимов\",\n \"Денисов\",\n \"Громов\",\n \"Фомин\",\n \"Давыдов\",\n \"Мельников\",\n \"Щербаков\",\n \"Блинов\",\n \"Колесников\",\n \"Карпов\",\n \"Афанасьев\",\n \"Власов\",\n \"Маслов\",\n \"Исаков\",\n \"Тихонов\",\n \"Аксенов\",\n \"Гаврилов\",\n \"Родионов\",\n \"Котов\",\n \"Горбунов\",\n \"Кудряшов\",\n \"Быков\",\n \"Зуев\",\n \"Третьяков\",\n \"Савельев\",\n \"Панов\",\n \"Рыбаков\",\n \"Суворов\",\n \"Абрамов\",\n \"Воронов\",\n \"Мухин\",\n \"Архипов\",\n \"Трофимов\",\n \"Мартынов\",\n \"Емельянов\",\n \"Горшков\",\n \"Чернов\",\n \"Овчинников\",\n \"Селезнев\",\n \"Панфилов\",\n \"Копылов\",\n \"Михеев\",\n \"Галкин\",\n \"Назаров\",\n \"Лобанов\",\n \"Лукин\",\n \"Беляков\",\n \"Потапов\",\n \"Некрасов\",\n \"Хохлов\",\n \"Жданов\",\n \"Наумов\",\n \"Шилов\",\n \"Воронцов\",\n \"Ермаков\",\n \"Дроздов\",\n \"Игнатьев\",\n \"Савин\",\n \"Логинов\",\n \"Сафонов\",\n \"Капустин\",\n \"Кириллов\",\n \"Моисеев\",\n \"Елисеев\",\n \"Кошелев\",\n \"Костин\",\n \"Горбачев\",\n \"Орехов\",\n \"Ефремов\",\n \"Исаев\",\n \"Евдокимов\",\n \"Калашников\",\n \"Кабанов\",\n \"Носков\",\n \"Юдин\",\n \"Кулагин\",\n \"Лапин\",\n \"Прохоров\",\n \"Нестеров\",\n \"Харитонов\",\n \"Агафонов\",\n \"Муравьев\",\n \"Ларионов\",\n \"Федосеев\",\n \"Зимин\",\n \"Пахомов\",\n \"Шубин\",\n \"Игнатов\",\n \"Филатов\",\n \"Крюков\",\n \"Рогов\",\n \"Кулаков\",\n \"Терентьев\",\n \"Молчанов\",\n \"Владимиров\",\n \"Артемьев\",\n \"Гурьев\",\n \"Зиновьев\",\n \"Гришин\",\n \"Кононов\",\n \"Дементьев\",\n \"Ситников\",\n \"Симонов\",\n \"Мишин\",\n \"Фадеев\",\n \"Комиссаров\",\n \"Мамонтов\",\n \"Носов\",\n \"Гуляев\",\n \"Шаров\",\n \"Устинов\",\n \"Вишняков\",\n \"Евсеев\",\n \"Лаврентьев\",\n \"Брагин\",\n \"Константинов\",\n \"Корнилов\",\n \"Авдеев\",\n \"Зыков\",\n \"Бирюков\",\n \"Шарапов\",\n \"Никонов\",\n \"Щукин\",\n \"Дьячков\",\n \"Одинцов\",\n \"Сазонов\",\n \"Якушев\",\n \"Красильников\",\n \"Гордеев\",\n \"Самойлов\",\n \"Князев\",\n \"Беспалов\",\n \"Уваров\",\n \"Шашков\",\n \"Бобылев\",\n \"Доронин\",\n \"Белозеров\",\n \"Рожков\",\n \"Самсонов\",\n \"Мясников\",\n \"Лихачев\",\n \"Буров\",\n \"Сысоев\",\n \"Фомичев\",\n \"Русаков\",\n \"Стрелков\",\n \"Гущин\",\n \"Тетерин\",\n \"Колобов\",\n \"Субботин\",\n \"Фокин\",\n \"Блохин\",\n \"Селиверстов\",\n \"Пестов\",\n \"Кондратьев\",\n \"Силин\",\n \"Меркушев\",\n \"Лыткин\",\n \"Туров\",\n )\n\n last_names_female = (\n \"Смирнова\",\n \"Иванова\",\n \"Кузнецова\",\n \"Попова\",\n \"Соколова\",\n \"Лебедева\",\n \"Козлова\",\n \"Новикова\",\n \"Морозова\",\n \"Петрова\",\n \"Волкова\",\n \"Соловьева\",\n \"Васильева\",\n \"Зайцева\",\n \"Павлова\",\n \"Семенова\",\n \"Голубева\",\n \"Виноградова\",\n \"Богданова\",\n \"Воробьева\",\n \"Федорова\",\n \"Михайлова\",\n \"Беляева\",\n \"Тарасова\",\n \"Белова\",\n \"Комарова\",\n \"Орлова\",\n \"Киселева\",\n \"Макарова\",\n \"Андреева\",\n \"Ковалева\",\n \"Ильина\",\n \"Гусева\",\n \"Титова\",\n \"Кузьмина\",\n \"Кудрявцева\",\n \"Баранова\",\n \"Куликова\",\n \"Алексеева\",\n \"Степанова\",\n \"Яковлева\",\n \"Сорокина\",\n \"Сергеева\",\n \"Романова\",\n \"Захарова\",\n \"Борисова\",\n \"Королева\",\n \"Герасимова\",\n \"Пономарева\",\n \"Григорьева\",\n \"Лазарева\",\n \"Медведева\",\n \"Ершова\",\n \"Никитина\",\n \"Соболева\",\n \"Рябова\",\n \"Полякова\",\n \"Цветкова\",\n \"Данилова\",\n \"Жукова\",\n \"Фролова\",\n \"Журавлева\",\n \"Николаева\",\n \"Крылова\",\n \"Максимова\",\n \"Сидорова\",\n \"Осипова\",\n \"Белоусова\",\n \"Федотова\",\n \"Дорофеева\",\n \"Егорова\",\n \"Матвеева\",\n \"Боброва\",\n \"Дмитриева\",\n \"Калинина\",\n \"Анисимова\",\n \"Петухова\",\n \"Антонова\",\n \"Тимофеева\",\n \"Никифорова\",\n \"Веселова\",\n \"Филиппова\",\n \"Маркова\",\n \"Большакова\",\n \"Суханова\",\n \"Миронова\",\n \"Ширяева\",\n \"Александрова\",\n \"Коновалова\",\n \"Шестакова\",\n \"Казакова\",\n \"Ефимова\",\n \"Денисова\",\n \"Громова\",\n \"Фомина\",\n \"Давыдова\",\n \"Мельникова\",\n \"Щербакова\",\n \"Блинова\",\n \"Колесникова\",\n \"Карпова\",\n \"Афанасьева\",\n \"Власова\",\n \"Маслова\",\n \"Исакова\",\n \"Тихонова\",\n \"Аксенова\",\n \"Гаврилова\",\n \"Родионова\",\n \"Котова\",\n \"Горбунова\",\n \"Кудряшова\",\n \"Быкова\",\n \"Зуева\",\n \"Третьякова\",\n \"Савельева\",\n \"Панова\",\n \"Рыбакова\",\n \"Суворова\",\n \"Абрамова\",\n \"Воронова\",\n \"Мухина\",\n \"Архипова\",\n \"Трофимова\",\n \"Мартынова\",\n \"Емельянова\",\n \"Горшкова\",\n \"Чернова\",\n \"Овчинникова\",\n \"Селезнева\",\n \"Панфилова\",\n \"Копылова\",\n \"Михеева\",\n \"Галкина\",\n \"Назарова\",\n \"Лобанова\",\n \"Лукина\",\n \"Белякова\",\n \"Потапова\",\n \"Некрасова\",\n \"Хохлова\",\n \"Жданова\",\n \"Наумова\",\n \"Шилова\",\n \"Воронцова\",\n \"Ермакова\",\n \"Дроздова\",\n \"Игнатьева\",\n \"Савина\",\n \"Логинова\",\n \"Сафонова\",\n \"Капустина\",\n \"Кириллова\",\n \"Моисеева\",\n \"Елисеева\",\n \"Кошелева\",\n \"Костина\",\n \"Горбачева\",\n \"Орехова\",\n \"Ефремова\",\n \"Исаева\",\n \"Евдокимова\",\n \"Калашникова\",\n \"Кабанова\",\n \"Носкова\",\n \"Юдина\",\n \"Кулагина\",\n \"Лапина\",\n \"Прохорова\",\n \"Нестерова\",\n \"Харитонова\",\n \"Агафонова\",\n \"Муравьева\",\n \"Ларионова\",\n \"Федосеева\",\n \"Зимина\",\n \"Пахомова\",\n \"Шубина\",\n \"Игнатова\",\n \"Филатова\",\n \"Крюкова\",\n \"Рогова\",\n \"Кулакова\",\n \"Терентьева\",\n \"Молчанова\",\n \"Владимирова\",\n \"Артемьева\",\n \"Гурьева\",\n \"Зиновьева\",\n \"Гришина\",\n \"Кононова\",\n \"Дементьева\",\n \"Ситникова\",\n \"Симонова\",\n \"Мишина\",\n \"Фадеева\",\n \"Комиссарова\",\n \"Мамонтова\",\n \"Носова\",\n \"Гуляева\",\n \"Шарова\",\n \"Устинова\",\n \"Вишнякова\",\n \"Евсеева\",\n \"Лаврентьева\",\n \"Брагина\",\n \"Константинова\",\n \"Корнилова\",\n \"Авдеева\",\n \"Зыкова\",\n \"Бирюкова\",\n \"Шарапова\",\n \"Никонова\",\n \"Щукина\",\n \"Дьячкова\",\n \"Одинцова\",\n \"Сазонова\",\n \"Якушева\",\n \"Красильникова\",\n \"Гордеева\",\n \"Самойлова\",\n \"Князева\",\n \"Беспалова\",\n \"Уварова\",\n \"Шашкова\",\n \"Бобылева\",\n \"Доронина\",\n \"Белозерова\",\n \"Рожкова\",\n \"Самсонова\",\n \"Мясникова\",\n \"Лихачева\",\n \"Бурова\",\n \"Сысоева\",\n \"Фомичева\",\n \"Русакова\",\n \"Стрелкова\",\n \"Гущина\",\n \"Тетерина\",\n \"Колобова\",\n \"Субботина\",\n \"Фокина\",\n \"Блохина\",\n \"Селиверстова\",\n \"Пестова\",\n \"Кондратьева\",\n \"Силина\",\n \"Меркушева\",\n \"Лыткина\",\n \"Турова\",\n )\n\n last_names = last_names_male + last_names_female\n\n middle_names_male = (\n \"Ааронович\",\n \"Абрамович\",\n \"Августович\",\n \"Авдеевич\",\n \"Аверьянович\",\n \"Адамович\",\n \"Адрианович\",\n \"Аксёнович\",\n \"Александрович\",\n \"Алексеевич\",\n \"Анатольевич\",\n \"Андреевич\",\n \"Анисимович\",\n \"Антипович\",\n \"Антонович\",\n \"Ануфриевич\",\n \"Арсенович\",\n \"Арсеньевич\",\n \"Артёмович\",\n \"Артемьевич\",\n \"Артурович\",\n \"Архипович\",\n \"Афанасьевич\",\n \"Бенедиктович\",\n \"Богданович\",\n \"Бориславович\",\n \"Бориславович\",\n \"Борисович\",\n \"Брониславович\",\n \"Валентинович\",\n \"Валерианович\",\n \"Валерьевич\",\n \"Валерьянович\",\n \"Васильевич\",\n \"Венедиктович\",\n \"Викентьевич\",\n \"Викторович\",\n \"Виленович\",\n \"Вилорович\",\n \"Витальевич\",\n \"Владиленович\",\n \"Владиславович\",\n \"Владленович\",\n \"Власович\",\n \"Всеволодович\",\n \"Вячеславович\",\n \"Гавриилович\",\n \"Гаврилович\",\n \"Геннадиевич\",\n \"Георгиевич\",\n \"Герасимович\",\n \"Германович\",\n \"Гертрудович\",\n \"Глебович\",\n \"Гордеевич\",\n \"Григорьевич\",\n \"Гурьевич\",\n \"Давидович\",\n \"Давыдович\",\n \"Даниилович\",\n \"Данилович\",\n \"Демидович\",\n \"Демьянович\",\n \"Денисович\",\n \"Димитриевич\",\n \"Дмитриевич\",\n \"Дорофеевич\",\n \"Евсеевич\",\n \"Евстигнеевич\",\n \"Егорович\",\n \"Елизарович\",\n \"Елисеевич\",\n \"Еремеевич\",\n \"Ермилович\",\n \"Ермолаевич\",\n \"Ерофеевич\",\n \"Ефимович\",\n \"Ефимьевич\",\n \"Ефремович\",\n \"Ефстафьевич\",\n \"Жанович\",\n \"Жоресович\",\n \"Захарьевич\",\n \"Зиновьевич\",\n \"Игнатович\",\n \"Игнатьевич\",\n \"Игоревич\",\n \"Измаилович\",\n \"Изотович\",\n \"Иларионович\",\n \"Ильич\",\n \"Ильясович\",\n \"Иосипович\",\n \"Иосифович\",\n \"Исидорович\",\n \"Марсович\",\n \"Матвеевич\",\n \"Тарасович\",\n \"Теймуразович\",\n \"Терентьевич\",\n \"Тимурович\",\n \"Тихонович\",\n \"Трифонович\",\n \"Трофимович\",\n \"Устинович\",\n \"Фадеевич\",\n \"Фёдорович\",\n \"Федосеевич\",\n \"Федосьевич\",\n \"Федотович\",\n \"Феликсович\",\n \"Феодосьевич\",\n \"Феоктистович\",\n \"Феофанович\",\n \"Филатович\",\n \"Филимонович\",\n \"Филиппович\",\n \"Фокич\",\n \"Фомич\",\n \"Фролович\",\n \"Харитонович\",\n \"Харламович\",\n \"Харлампович\",\n \"Харлампьевич\",\n \"Чеславович\",\n \"Эдгардович\",\n \"Эдгарович\",\n \"Эдуардович\",\n \"Юлианович\",\n \"Юльевич\",\n \"Яковлевич\",\n \"Якубович\",\n \"Ярославович\",\n )\n\n middle_names_female = (\n \"Александровна\",\n \"Андреевна\",\n \"Архиповна\",\n \"Алексеевна\",\n \"Антоновна\",\n \"Аскольдовна\",\n \"Альбертовна\",\n \"Аркадьевна\",\n \"Афанасьевна\",\n \"Анатольевна\",\n \"Артемовна\",\n \"Богдановна\",\n \"Болеславовна\",\n \"Борисовна\",\n \"Вадимовна\",\n \"Васильевна\",\n \"Владимировна\",\n \"Валентиновна\",\n \"Вениаминовна\",\n \"Владиславовна\",\n \"Валериевна\",\n \"Викторовна\",\n \"Вячеславовна\",\n \"Геннадиевна\",\n \"Георгиевна\",\n \"Геннадьевна\",\n \"Григорьевна\",\n \"Даниловна\",\n \"Дмитриевна\",\n \"Евгеньевна\",\n \"Егоровна\",\n \"Ефимовна\",\n \"Ждановна\",\n \"Захаровна\",\n \"Ивановна\",\n \"Игоревна\",\n \"Ильинична\",\n \"Кирилловна\",\n \"Кузьминична\",\n \"Константиновна\",\n \"Кузьминична\",\n \"Леонидовна\",\n \"Леоновна\",\n \"Львовна\",\n \"Макаровна\",\n \"Матвеевна\",\n \"Михайловна\",\n \"Максимовна\",\n \"Мироновна\",\n \"Натановна\",\n \"Никифоровна\",\n \"Ниловна\",\n \"Наумовна\",\n \"Николаевна\",\n \"Олеговна\",\n \"Оскаровна\",\n \"Павловна\",\n \"Петровна\",\n \"Робертовна\",\n \"Рубеновна\",\n \"Руслановна\",\n \"Романовна\",\n \"Рудольфовна\",\n \"Святославовна\",\n \"Сергеевна\",\n \"Степановна\",\n \"Семеновна\",\n \"Станиславовна\",\n \"Тарасовна\",\n \"Тимофеевна\",\n \"Тимуровна\",\n \"Федоровна\",\n \"Феликсовна\",\n \"Филипповна\",\n \"Харитоновна\",\n \"Эдуардовна\",\n \"Эльдаровна\",\n \"Юльевна\",\n \"Юрьевна\",\n \"Яковлевна\",\n )\n\n middle_names = middle_names_male + middle_names_female\n\n language_names = (\n \"Афарский\",\n \"Абхазский\",\n \"Авестийский\",\n \"Африкаанс\",\n \"Акан\",\n \"Амхарский\",\n \"Арагонский\",\n \"Арабский\",\n \"Ассамский\",\n \"Аварский\",\n \"Аймарский\",\n \"Азербайджанский\",\n \"Башкирский\",\n \"Белорусский\",\n \"Болгарский\",\n \"Бислама\",\n \"Бенгальский\",\n \"Тибетский\",\n \"Бретонский\",\n \"Боснийский\",\n \"Каталанский\",\n \"Чеченский\",\n \"Чаморро\",\n \"Корсиканский\",\n \"Кри\",\n \"Чешский\",\n \"Чувашский\",\n \"Валлийский\",\n \"Датский\",\n \"Немецкий\",\n \"Греческий\",\n \"Английский\",\n \"Эсперанто\",\n \"Испанский\",\n \"Эстонский\",\n \"Персидский\",\n \"Финский\",\n \"Фиджийский\",\n \"Фарси\",\n \"Французский\",\n \"Ирландский\",\n \"Гэльский\",\n \"Галийский\",\n \"Иврит\",\n \"Хинди\",\n \"Хорватский\",\n \"Гавайский\",\n \"Болгарский\",\n \"Армянский\",\n \"Индонезийский\",\n \"Исландский\",\n \"Итальянский\",\n \"Японский\",\n \"Яванский\",\n \"Грузинский\",\n \"Казахский\",\n \"Корейский\",\n \"Кашмири\",\n \"Курдский\",\n \"Коми\",\n \"Киргизский\",\n \"Латинский\",\n \"Люксембургский\",\n \"Лимбургский\",\n \"Лингала\",\n \"Лаосский\",\n \"Литовский\",\n \"Латвийский\",\n \"Малагасийский\",\n \"Маршалльский\",\n \"Маори\",\n \"Македонский\",\n \"Малаялам\",\n \"Монгольский\",\n \"Маратхи\",\n \"Малайский\",\n \"Мальтийский\",\n \"Непальский\",\n \"Нидерландский\",\n \"Норвежский\",\n \"Навахо\",\n \"Оромо\",\n \"Ория\",\n \"Осетинский\",\n \"Пали\",\n \"Польский\",\n \"Пуштунский\",\n \"Португальский\",\n \"Романшский\",\n \"Румынский\",\n \"Русский\",\n \"Киньяруанда\",\n \"Санскрит\",\n \"Сардинский\",\n \"Санго\",\n \"Сингальский\",\n \"Словацкий\",\n \"Словенский\",\n \"Самоанский\",\n \"Сомалийский\",\n \"Албанский\",\n \"Сербский\",\n \"Сунданский\",\n \"Шведский\",\n \"Суахили\",\n \"Тамильский\",\n \"Телугу\",\n \"Таджикский\",\n \"Тайский\",\n \"Тигринья\",\n \"Туркменский\",\n \"Тагальский\",\n \"Тсвана\",\n \"Тонга\",\n \"Турецкий\",\n \"Тсонга\",\n \"Татарский\",\n \"Таитянский\",\n \"Уйгурский\",\n \"Украинский\",\n \"Урду\",\n \"Узбекский\",\n \"Венда\",\n \"Вьетнамский\",\n \"Идиш\",\n \"Йоруба\",\n \"Китайский\",\n \"Зулу\",\n )\n\n prefixes_male: Sequence[str] = (\"г-н\", \"тов.\")\n\n prefixes_female: Sequence[str] = (\"г-жа\", \"тов.\")\n\n def middle_name(self) -> str:\n return self.random_element(self.middle_names)\n\n def middle_name_male(self) -> str:\n return self.random_element(self.middle_names_male)\n\n def middle_name_female(self) -> str:\n return self.random_element(self.middle_names_female)\n","repo_name":"joke2k/faker","sub_path":"faker/providers/person/ru_RU/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":37558,"program_lang":"python","lang":"kv","doc_type":"code","stars":16539,"dataset":"github-code","pt":"16"} +{"seq_id":"22389728205","text":"from __future__ import annotations\n\nfrom itertools import islice, zip_longest\n\nimport numpy as np\n\nfrom assignment2.assignment2 import nparray_tail, sliding_window, window2\n\n\nclass TestAssignment2:\n def test_window(self: TestAssignment2, resources):\n sony_training_labels = np.load(\n str(resources.joinpath(\"sony_training_labels.npy\")), allow_pickle=True\n )\n\n test_list = nparray_tail(sony_training_labels, 41).tolist()\n\n testagain = list(window2(test_list))\n print(testagain)\n\n test2 = [list(x) for x in sliding_window(test_list)]\n print(test2)\n\n def test_sublist_freq(self, resources):\n sony_training_labels = np.load(\n str(resources.joinpath(\"sony_training_labels.npy\")), allow_pickle=True\n )\n\n print(sony_training_labels)\n # initializing list\n test_list = [4, 5, 3, 5, 7, 8, 3, 5, 7, 2, 7, 3, 2]\n test_list = sony_training_labels.tolist()\n\n # printing original list\n print(\"The original list is : \" + str(test_list))\n\n # initializing Sublist\n sublist = [\"-\", \"-\", \"-\"]\n\n # slicing is used to extract chunks and compare\n res = len(\n [\n sublist\n for idx in range(len(test_list))\n if test_list[idx : idx + len(sublist)] == sublist\n ]\n )\n\n # printing result\n print(\"The sublist count : \" + str(res))\n\n def test2(self, resources):\n sony_training_labels = np.load(\n str(resources.joinpath(\"sony_training_labels.npy\")), allow_pickle=True\n )\n\n print(sony_training_labels)\n # initializing list\n test_list = [4, 5, 3, 5, 7, 8, 3, 5, 7, 2, 7, 3, 2]\n\n t = str(type(sony_training_labels))\n print(t)\n test_list = sony_training_labels.tolist()\n\n # printing original list\n print(\"The original list is : \" + str(test_list))\n\n # initializing Sublist\n sublist = [\"-\", \"-\", \"-\"]\n\n # slicing is used to extract chunks and compare\n res = []\n idx = 0\n while True:\n try:\n # getting to the index\n idx = test_list.index(sublist[0], idx)\n except ValueError:\n break\n\n # using all() to check for all elements equivalence\n if all(\n x == y\n for (x, y) in zip_longest(\n sublist, islice(test_list, idx, idx + len(sublist))\n )\n ):\n res.append(sublist)\n idx += len(sublist)\n idx += 1\n\n res = len(res)\n\n # printing result\n print(\"The sublist count : \" + str(res))\n","repo_name":"szmyty/misc","sub_path":"assignments/assignment2/tests/test_assignment2.py","file_name":"test_assignment2.py","file_ext":"py","file_size_in_byte":2733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1369661923","text":"class Solution:\r\n # @param s: A string s\r\n # @param dict: A dictionary of words dict\r\n def wordBreak(self, s, dict):\r\n n = len(s)\r\n dp = [True] + [False] * n\r\n len_max = max(map(len,dict)) if dict else 0\r\n for i in range(1, n+1):\r\n for j in range(i-1, max(0, i-len_max)-1, -1):\r\n if dp[j] and s[j:i] in dict:\r\n dp[i] = True\r\n break\r\n \r\n return dp[n]","repo_name":"WeikangChen/algorithm","sub_path":"lintcode/107_word-break/word-break.py","file_name":"word-break.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"28487834374","text":"from socket import *\nimport threading\n\nclass ChessClientListenerThread(threading.Thread):\n\n\tdef __init__(self, client_listener_socket, bufsize):\n\t\tsuper(ChessClientListenerThread, self).__init__()\n\t\tself.client_listener_socket = client_listener_socket\n\t\tself.is_new_message = False\n\t\tself.bufsize = bufsize\n\t\tself.message = ''\n\n\tdef get_message(self):\n\t\treturn self.message\n\n\tdef start(self):\n\t\twhile self.message != 'QUIT':\n\t\t\tself.message = self.client_listener_socket.recv(self.bufsize)\n\t\t\tself.is_new_message = True\n\t\t\n\t\"\"\"\n\tdef connect(self):\n\t\tif not self.is_connected:\n\t\t\tself.client_socket = socket(AF_INET, SOCK_STREAM)\n\t\t\tself.client_socket.connect(self.addr)\n\t\t\tself.is_connected = True\n\t\t\tprint \"Connection successful.\"\n\t\t\treturn True\n\t\treturn False\n\n\tdef receive_data(self):\n\t\tself.is_polling = True\n\t\tself.return_message = self.client_socket.recv(self.bufsize)\n\t\tself.is_polling = False\n\n\tdef send_data(self, message):\n\t\tself.client_socket.send(message)\n\t\treturn True\n\n\t# def start(self):\n\t# \t# Wait for the server to tell us that both players are ready.\n\t# \tclient_socket = socket(AF_INET, SOCK_STREAM)\n\t# \tclient_socket.connect(self.addr)\n\t# \tbufsize = self.bufsize\n\n\t# \tif client_socket.recv(bufsize) == 'ready':\n\t# \t\t# Ask for the color to play as, which will determine the order.\n\t# \t\tcolor = raw_input('Color: ')\n\t# \t\tclient_socket.send(color)\n\n\t# \t\twhile True:\n\t# \t\t\tif color == 'white':\n\t# \t\t\t\tmove = raw_input('')\n\t# \t\t\t\tclient_socket.send(move)\n\t# \t\t\t\tprint client_socket.recv(bufsize)\n\t# \t\t\telse:\n\t# \t\t\t\tprint client_socket.recv(bufsize)\n\t# \t\t\t\tmove = raw_input('')\n\t# \t\t\t\tclient_socket.send(move)\n\n\t# \telse:\n\t# \t\tprint \"[ChessClient]: Something went horribly wrong. D:\"\n\t\"\"\"\nif __name__ == '__main__':\n\tChessClient().start()","repo_name":"kieferyap/chess-with-benefits","sub_path":"chess_client_thread.py","file_name":"chess_client_thread.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"32219515452","text":"# Imports\nimport discord\nfrom discord.ext import commands\n\n\n# Defining what prefix we want\nbot = commands.Bot(command_prefix=\"~\")\n\n# This will show the bot's name in the console\n@bot.event\nasync def on_ready():\n print(\"I have logged in as {0.user}\".format(bot))\n\n# Commands\n@bot.command()\nasync def ping(ctx):\n await ctx.send('Pong! {0}'.format(round(bot.latency, 1)))\n\n\n# Enter here the token which you have copied before\nbot.run(\"Your Token\")\n","repo_name":"dev-schueppchen/updated-tutorials","sub_path":"Discord.py/Episode-1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"} +{"seq_id":"23453209043","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\n# from heapq import heappush, heappop\n#\n# N = int(input().strip())\n#\n# heap = []\n# delset = set([])\n#\n# for _ in range(N):\n# q = [map(int, input().strip().split(\" \"))]\n#\n# if q[0] == 1:\n# heappush(heap, q[1])\n# delset.discard(q[1])\n# elif q[0] == 2:\n# delset.add(q[1])\n# elif q[0] == 3:\n# while True:\n# item = heap[0]\n# if item not in delset:\n# break\n# delset.discard(heappop(heap))\n# print(item)\nimport sys\n\narr = [6, -992, 99999, 4, 1, -99]\n\n\ndef max_min(arr):\n mins = 100\n maxs = -900\n for i in range(len(arr)):\n mins = arr[i] ^ ((mins ^ arr[i]) & -(mins < arr[i]))\n maxs = maxs ^ ((maxs ^ arr[i]) & -(maxs < arr[i]))\n print(mins)\n print(maxs)\n\n\nmax_min(arr)\n\n\ndef adding(x, y):\n while y != 0:\n carry = x & y\n x = x ^ y\n y = carry << 1\n print(x)\n\n\nadding(5, 6)\n\n\ndef substracting(x, y):\n while y != 0:\n carry = ~x & y\n x = x ^ y\n y = carry << 1\n print(x)\n\n\nsubstracting(5, 2)\n\n\ndef multiplication(x, y):\n pass\n\n\nmultiplication(3, 5)\n\n\n# x = 6\n# y = 9\n# r = y ^ ((x ^ y) & -(x < y)) # min\n# print(r)\n# r = x ^ ((x ^ y) & -(x < y)) # max\n# print(r)\n\n\ndef isPowerOf2(v):\n f = False\n f = (v and not (v & (v - 1)))\n print(f)\n\n\ndef isOfOppositeSign(x, y):\n f = False\n f = (x ^ y) < 0\n print(f)\n\n\nisPowerOf2(31)\nisOfOppositeSign(9, 3)\n\n\ndef absoluteValue(n):\n char_bit = 8\n size_int = sys.getsizeof(int())\n print(size_int)\n mask = n >> (size_int * char_bit - 1)\n print((n + mask) ^ mask)\n # return (n ^ mask) - mask # we can use this also\n\n\nabsoluteValue(-99)\ns1 = \"abcbce\"\ns2 = \"bce\"\n\n\ndef process(s1, s2):\n l1 = len(s1)\n l2 = len(s2)\n i = 0\n while i < l1:\n j = 0\n if s1[i] == s2[j]:\n j += 1\n while j < l2 and s1[i + j] == s2[j]:\n j += 1\n if j == l2:\n return True\n i += 1\n return False\n\n\nprint(process(s1, s2))\n","repo_name":"1576dkm/Spark_Jobs","sub_path":"wrong12/wrong12.py","file_name":"wrong12.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"35363197303","text":"from wxPython import wx\n\nclass MyApp(wx.wxApp):\n\n def OnInit(self):\n frame = wx.wxFrame(None, -1, \"font bug\", size=(200, 200))\n panel = wx.wxPanel(frame, -1)\n self.panel = panel\n \n frame.Show(1)\n\n\n self.btn = wx.wxButton(panel, -1, \"Button\")\n\n borderStyle = 0 #wx.wxNO_BORDER\n tstyle = wx.wxTE_RICH2 | wx.wxTE_PROCESS_TAB | wx.wxTE_MULTILINE | borderStyle | \\\n wx.wxNO_FULL_REPAINT_ON_RESIZE | wx.wxCLIP_SIBLINGS\n\n self.text = wx.wxTextCtrl(panel, -1, 'Hello World', (0, 50), style=tstyle)\n \n font = wx.wxFont(18, wx.wxSWISS, wx.wxNORMAL, wx.wxNORMAL)\n self.btn.SetFont(font)\n self.text.SetFont(font)\n\n self.SetTopWindow(frame)\n\n return 1\n\n\napp = MyApp(0)\napp.MainLoop()\n\n\n\n \t \t \n","repo_name":"wxWidgets/trac-attachments","sub_path":"ticket/833/8334918da76533b5c14f235f374e327c95035aa3/a393e6fa762b9f855063747c066710b77277c5ab.py","file_name":"a393e6fa762b9f855063747c066710b77277c5ab.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"31239505311","text":"import numpy as np\nimport lms_code.plots.plot_all as lms_plot\nfrom lms_code.plots.plot_comparison import plot_lms_gps\nimport lms_code.lib.rep2 as rep2\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport datetime\n\njune15 = datetime.datetime(year = 2008, month = 6, day = 15)\ndef get_early(qi_gps, qi_gps_all, before = june15, inv = False):\n dist = []\n vel = []\n sig = []\n for i, gps_idx in enumerate(qi_gps['selected']):\n if inv is False:\n if qi_gps_all['resurvey'][gps_idx] > before:\n continue\n else:\n if qi_gps_all['resurvey'][gps_idx] < before:\n continue\n\n dist.append(qi_gps['dist'][i] / 1000.0)\n vel.append(qi_gps['vel'][i])\n sig.append(qi_gps['sig'][i])\n return dist, vel, sig\n\ndef main():\n qi_gps_all = rep2.load('qi_gps_imported')\n qi_gps = rep2.load('qi_gps_near')\n plot_params = lms_plot.params()\n plot_params['rigid_offset'] = 0.0\n\n lms_plot.setup()\n fig, axarr = plt.subplots(1, sharex = True)\n axarr = [axarr]\n # divider = make_axes_locatable(axarr[0])\n # hidden_ax = divider.append_axes(\"right\", plot_params['cbar_width'], pad=\"3%\")\n # hidden_ax.set_visible(False)\n\n\n def plot(inv, ax, label):\n dist, vel, sig = get_early(qi_gps, qi_gps_all, inv = inv)\n color = '#FF0000'\n if inv:\n color = '#0000FF'\n ax.plot(np.array(dist) - plot_params['view_left'],\n np.array(vel) + plot_params['rigid_offset'],\n 'o', color = color, label = label,\n zorder = 1000)\n\n plot(False, axarr[0], 'Early')\n plot(True, axarr[0], 'Late')\n axarr[0].set_ylabel('$v_{\\\\textrm{both}}$ (cm)')\n # axarr[1].set_ylabel('$v_{\\\\textrm{late}}$ (cm)')\n axarr[0].set_xlabel('$x$ (km)')\n axarr[0].set_xlim([0, 550])\n # axarr[1].set_xlim([0, 550])\n zoomed = False\n if zoomed:\n axarr[0].set_ylim([-20, 20])\n axarr[1].set_ylim([-20, 20])\n plot_params['fig_scale'][1] /= 2.0\n plt.legend()\n fig.set_size_inches(plot_params['fig_scale'])\n filename = 'qi_gps_xsec.pdf'\n if zoomed:\n filename = 'qi_gps_xsec_zoomed.pdf'\n fig.savefig(filename, bbox_inches = 'tight')\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"tbenthompson/LMS_public","sub_path":"lms_code/plots/plot_qi_xsec.py","file_name":"plot_qi_xsec.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"3085820054","text":"import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport requests\nfrom io import StringIO\n\nimport xlsxwriter\nimport ApiKeys\n\nfrom Instruments import Instruments\nfrom RedcapApiHandler import RedcapApiHandler\n\nclass DataAnalyser:\n def __init__(self, resource_dir, data, site):\n self.data = data\n self.instruments = Instruments(data)\n self.resource_dir = resource_dir\n self.site = site\n\n def plot_histogram(self, data, col, outliers):\n q1 = data.quantile(0.25)\n q3 = data.quantile(0.75)\n median = np.round(data.median(),1)\n iqr = q3 - q1\n\n std = np.round(data.std(),1)\n mean = np.round(data.mean(),1)\n\n upper_limit_iqr = q3 + 1.5 * iqr\n lower_limit_iqr = q1 - 1.5 * iqr\n\n upper_limit_std = mean + std * 3\n lower_limit_std = mean - std * 3\n\n upper_bound = max(upper_limit_iqr, upper_limit_std)\n lower_bound = min(lower_limit_iqr, lower_limit_std)\n\n plt.figure(figsize=(16*0.9, 9*0.9), dpi=200)\n plt.title(col)\n\n ax = data.plot.hist(bins=50)\n\n # Plot and label the median\n plt.axvline(median, color='k', linestyle='dashed', linewidth=1)\n plt.text(median*1.0001, ax.dataLim.ymax*0.9995, str('Median: ' + str(median)), fontsize=9)\n\n # Don't plot the lower boundary if it is below 0 and there are no negative values\n if (lower_bound >= 0) or (data[data < 0].size > 0):\n plt.axvline(lower_bound, color='k', linestyle='dashed', linewidth=1)\n\n plt.axvline(upper_bound, color='k', linestyle='dashed', linewidth=1)\n\n # Sort the outliers by value\n outliers = outliers.to_frame().sort_values(by=col)\n\n # Get the histogram bins and the number of values in each bin\n bar_info = pd.cut(data, 50)\n num_vals = bar_info.value_counts().sort_index()\n\n # This loop is used to write the outlier IDs above the histogram bins\n id_text_height = 0\n last_bin_idx = -1\n for idx, num in outliers.iterrows():\n current_bin_idx = num_vals.index.get_loc(bar_info[idx])\n\n # Get the number of values of the bin of the current outlier ID\n num_bin_vals = num_vals.values[current_bin_idx]\n\n # Reset id_text_height if this is a new bin and update last_bin_idx\n if current_bin_idx != last_bin_idx:\n last_bin_idx = current_bin_idx\n id_text_height = 0\n\n # If there are more than 30 values in the bin, don't write the IDs\n if num_bin_vals > 30:\n continue\n\n # Write the ID above the bin, incremented by id_text_height,\n # and increment id_text_height by id_step\n plt.text(bar_info[idx].mid, num_bin_vals + id_text_height, idx, fontsize=7, rotation=40)\n\n id_step = np.round((ax.dataLim.ymax - num_bin_vals) / 30 , 2)\n id_text_height += id_step\n\n fig_title = self.resource_dir + col + '_hist.png'\n\n plt.savefig(fig_title, bbox_inches='tight')\n plt.close()\n\n return fig_title\n #plt.show()\n\n def instrument_outliers(self, instrument_data, data_frame, instrument_key):\n for col in instrument_data.columns:\n\n if col in self.ignored_cols:\n continue\n\n data = instrument_data[col]\n\n # Skip iteration if all data is NaN\n if data.dropna().size == 0:\n continue\n\n data = data.dropna()\n\n # Remove -999 (missing data)\n data = data[data != -999]\n\n q1 = data.quantile(0.25)\n q3 = data.quantile(0.75)\n mean = np.round(data.mean(),1)\n std = np.round(data.std(),1)\n median = np.round(data.median(),1)\n iqr = q3 - q1\n\n # Skip iteration if IQR = 0\n if iqr == 0:\n continue\n\n upper_limit_iqr = q3 + 1.5 * iqr\n lower_limit_iqr = q1 - 1.5 * iqr\n\n upper_limit_std = mean + std * 3\n lower_limit_std = mean - std * 3\n\n upper_limit = max(upper_limit_iqr, upper_limit_std)\n lower_limit = min(lower_limit_iqr, lower_limit_std)\n\n # Find outliers i.e. values outside the range (q1 - 1.5 * iqr, q3 + 1.5 * iqr)\n mask = data.between(lower_limit, upper_limit, inclusive='both')\n outliers = data[~mask].dropna()\n\n # Skip iteration if there are no outliers\n if outliers.size == 0:\n continue\n\n # self.plot_histogram(data, col, outliers)\n\n outliers = outliers.to_frame()\n\n outliers.rename(columns={col:'Value'}, inplace=True)\n outliers['Data Field'] = col\n outliers['Instrument'] = instrument_key\n # outliers['Median'] = median\n outliers['Lower Limit'] = lower_limit\n outliers['Upper Limit'] = upper_limit\n\n # outliers['Limit'] = np.where( ( outliers['Value'] >= upper_limit ), upper_limit, lower_limit )\n # outliers['Comment'] = ''\n\n data_frame = data_frame.append(outliers)\n\n return data_frame\n\n def write_outliers_report(self, outliers_xlsx_writer):\n exceptions = RedcapApiHandler(self.site).get_exceptions_from_redcap()\n\n df = pd.DataFrame()\n\n for instrument_key, instrument_getter in self.instruments.instrument_dict.items():\n if instrument_key == 'ethnolinguistic_information':\n continue\n instrument_data = instrument_getter(self.instruments)\n instrument_data.set_index(['study_id'], inplace=True)\n instrument_data = instrument_data.select_dtypes(include=np.number)\n df = self.instrument_outliers(instrument_data, df, instrument_key)\n\n # Remove exceptions from outliers frame\n df = pd.merge(df, exceptions, on=['study_id','Data Field'], how='outer', indicator='source')\n df = df[df['source'] == 'left_only'].drop('source', axis=1)\n\n df['Is Correct'] = ''\n df['Comment/Updated Value'] = ''\n df = df[['Data Field', 'Instrument', 'Value', 'Lower Limit', 'Upper Limit', 'Is Correct', 'Comment/Updated Value']]\n df = df.sort_values(by=['study_id', 'Instrument'])\n df.reset_index(inplace=True)\n df.to_excel(outliers_xlsx_writer, sheet_name='Outliers', startcol=0, startrow=3, index=False)\n\n lower_limit_text = 'Lower Limit = min(mean - std * 3, 1st quartile - 1.5 * IQR)'\n upper_limit_text = 'Upper Limit = max(mean + std * 3, 3rd quartile + 1.5 * IQR)'\n\n outliers_xlsx_writer.sheets['Outliers'].write(0, 0, lower_limit_text)\n outliers_xlsx_writer.sheets['Outliers'].write(1, 0, upper_limit_text)\n outliers_xlsx_writer.sheets['Outliers'].set_column(0, 0 , 15)\n outliers_xlsx_writer.sheets['Outliers'].set_column(1, 1 , 30)\n outliers_xlsx_writer.sheets['Outliers'].set_column(2, 2 , 30)\n outliers_xlsx_writer.sheets['Outliers'].set_column(3, 3 , 10)\n outliers_xlsx_writer.sheets['Outliers'].set_column(4, 4 , 12)\n outliers_xlsx_writer.sheets['Outliers'].set_column(5, 5 , 12)\n outliers_xlsx_writer.sheets['Outliers'].set_column(6, 6 , 20)\n outliers_xlsx_writer.sheets['Outliers'].set_column(7, 7 , 30)\n\n # Skip all dropdowns/checkboxes/radio buttons\n ignored_cols = ['bloc_hours_last_drink',\n 'bloc_last_ate_hrs',\n 'demo_home_language',\n 'home_language_confirmation',\n 'phase_1_site_id_1',\n 'phase_1_gender',\n 'phase_1_home_language',\n 'phase_1_ethnicity',\n 'gene_site',\n 'demo_gender_correction',\n 'home_language',\n 'ethnicity_confirmation',\n 'ethnicity',\n 'ethn_father_ethn_sa',\n 'ethn_father_lang_sa',\n 'ethn_pat_gfather_ethn_sa',\n 'ethn_pat_gfather_lang_sa',\n 'ethn_pat_gmother_ethn_sa',\n 'ethn_pat_gmother_lang_sa',\n 'ethn_mother_ethn_sa',\n 'ethn_mother_lang_sa',\n 'ethn_mat_gfather_ethn_sa',\n 'ethn_mat_gfather_lang_sa',\n 'ethn_mat_gmother_ethn_sa',\n 'ethn_mat_gmother_lang_sa',\n 'famc_siblings',\n 'famc_children',\n 'preg_pregnant',\n 'preg_birth_control',\n 'preg_hysterectomy',\n 'preg_regular_periods',\n 'preg_last_period_remember',\n 'preg_period_more_than_yr',\n 'mari_marital_status',\n 'educ_highest_level',\n 'empl_status',\n 'empl_days_work',\n 'cogn_read_sentence',\n 'cogn_memory',\n 'cogn_difficulty_remember',\n 'cogn_difficulty_concern',\n 'cogn_learning_new_task',\n 'cogn_words_remember_p1',\n 'cogn_year',\n 'cogn_what_is_the_month',\n 'cogn_day_of_the_month',\n 'cogn_country_of_residence',\n 'cogn_district_province',\n 'cogn_village_town_city',\n 'cogn_weekdays_forward',\n 'cogn_weekdays_backwards',\n 'frai_use_hands',\n 'frai_sit_stands_completed',\n 'frai_non_dominant_hand',\n 'frai_complete_procedure',\n 'frai_need_support',\n 'frai_procedure_walk_comp',\n 'cogn_delayed_recall',\n 'cogn_word_cognition_list',\n 'hous_electricity',\n 'hous_solar_energy',\n 'hous_power_generator',\n 'hous_alter_power_src',\n 'hous_television',\n 'hous_radio',\n 'hous_motor_vehicle',\n 'hous_motorcycle',\n 'hous_bicycle',\n 'hous_refrigerator',\n 'hous_washing_machine',\n 'hous_sewing_machine',\n 'hous_telephone',\n 'hous_mobile_phone',\n 'hous_microwave',\n 'hous_dvd_player',\n 'hous_satellite_tv_or_dstv',\n 'hous_computer_or_laptop',\n 'hous_internet_by_computer',\n 'hous_internet_by_m_phone',\n 'hous_electric_iron',\n 'hous_fan',\n 'hous_electric_gas_stove',\n 'hous_kerosene_stove',\n 'hous_plate_gas',\n 'hous_electric_plate',\n 'hous_torch',\n 'hous_gas_lamp',\n 'hous_kerosene_lamp',\n 'hous_toilet_facilities',\n 'hous_portable_water',\n 'hous_grinding_mill',\n 'hous_table',\n 'hous_sofa',\n 'hous_wall_clock',\n 'hous_bed',\n 'hous_mattress',\n 'hous_blankets',\n 'hous_cattle',\n 'hous_other_livestock',\n 'hous_poultry',\n 'hous_tractor',\n 'hous_plough',\n 'subs_tobacco_use',\n 'subs_smoke_100',\n 'subs_smoke_now',\n 'subs_smoke_last_hour',\n 'subs_smoke_cigarettes',\n 'subs_smoking_frequency',\n 'subs_smoke_per_day',\n 'subs_smokeless_tobacc_use',\n 'subs_snuff_use',\n 'subs_snuff_method_use',\n 'subs_snuff_use_freq',\n 'subs_tobacco_chew_use',\n 'subs_freq_snuff_use',\n 'subs_tobacco_chew_freq',\n 'subs_tobacco_chew_d_freq',\n 'subs_alcohol_consump',\n 'subs_alcohol_consume_now',\n 'subs_alcohol_consump_freq',\n 'subs_alcohol_cutdown',\n 'subs_alcohol_criticize',\n 'subs_alcohol_guilty',\n 'subs_alcohol_hangover',\n 'subs_alcohol_con_past_yr',\n 'subs_alcoholtype_consumed',\n 'subs_drugs_use',\n 'subs_drug_use_other',\n 'genh_breast_cancer',\n 'genh_breast_cancer_treat',\n 'genh_bre_cancer_treat_now',\n 'genh_bre_cancer_trad_med',\n 'genh_cervical_cancer',\n 'genh_cer_cancer_treat',\n 'genh_cer_cancer_treat_now',\n 'genh_cer_cancer_trad_med',\n 'genh_prostate_cancer',\n 'genh_pro_cancer_treat',\n 'genh_pro_cancer_treat_now',\n 'genh_pro_cancer_trad_med',\n 'genh_oesophageal_cancer',\n 'genh_oes_cancer_treat',\n 'genh_oes_cancer_treat_now',\n 'genh_oesophageal_trad_med',\n 'genh_other_cancers',\n 'genh_other_cancer_treat',\n 'genh_oth_cancer_treat_now',\n 'genh_oth_cancer_trad_med',\n 'genh_obesity_mom',\n 'genh_h_blood_pressure_mom',\n 'genh_h_cholesterol_mom',\n 'genh_breast_cancer_mom',\n 'genh_cervical_cancer_mom',\n 'genh_oes_cancer_mom',\n 'genh_cancer_other_mom',\n 'genh_asthma_mom',\n 'genh_obesity_dad',\n 'genh_h_blood_pressure_dad',\n 'genh_h_cholesterol_dad',\n 'genh_prostate_cancer_dad',\n 'genh_other_cancers_dad',\n 'genh_asthma_dad',\n 'genh_starchy_staple_food',\n 'genh_change_diet',\n 'genh_lose_weight',\n 'genh_pesticide',\n 'genh_pesticide_region',\n 'genh_pesticide_type',\n 'genh_cooking_place',\n 'genh_cooking_done_inside',\n 'genh_energy_source_type',\n 'genh_smoker_in_your_house',\n 'genh_smoke_freq_someone',\n 'genh_insect_repellent_use',\n 'infh_malaria',\n 'infh_malaria_month',\n 'infh_malaria_area',\n 'infh_tb',\n 'infh_tb_12months',\n 'infh_tb_treatment',\n 'infh_tb_meds',\n 'infh_tb_counselling',\n 'infh_tb_traditional_med',\n 'infh_hiv_tested',\n 'infh_hiv_status',\n 'infh_hiv_positive',\n 'infh_hiv_medication',\n 'infh_hiv_arv_meds_now',\n 'infh_hiv_arv_single_pill',\n 'infh_hiv_pill_size',\n 'infh_hiv_traditional_meds',\n 'infh_painful_feet_hands',\n 'infh_hypersensitivity',\n 'infh_kidney_problems',\n 'infh_liver_problems',\n 'infh_change_in_body_shape',\n 'infh_mental_state_change',\n 'infh_chol_levels_change',\n 'infh_hiv_test',\n 'infh_hiv_counselling',\n 'carf_blood_sugar',\n 'carf_diabetes',\n 'carf_diabetes_12months',\n 'carf_diabetes_treatment',\n 'carf_diabetes_treat_now',\n 'carf_diabetes_treat',\n 'carf_diabetes_traditional',\n 'carf_diabetes_history',\n 'carf_diabetes_mother',\n 'carf_diabetes_father',\n 'carf_diabetes_brother_1',\n 'carf_diabetes_brother_2',\n 'carf_diabetes_brother_3',\n 'carf_diabetes_brother_4',\n 'carf_diabetes_sister_1',\n 'carf_diabetes_sister_2',\n 'carf_diabetes_sister_3',\n 'carf_diabetes_sister_4',\n 'carf_diabetes_son_1',\n 'carf_diabetes_son_2',\n 'carf_diabetes_son_3',\n 'carf_diabetes_son_4',\n 'carf_daughter_diabetes_1',\n 'carf_diabetes_daughter_2',\n 'carf_diabetes_daughter_3',\n 'carf_diabetes_daughter_4',\n 'carf_diabetes_fam_other',\n 'carf_stroke',\n 'carf_tia',\n 'carf_weakness',\n 'carf_numbness',\n 'carf_blindness',\n 'carf_half_vision_loss',\n 'carf_understanding_loss',\n 'carf_expression_loss',\n 'carf_angina',\n 'carf_angina_treatment',\n 'carf_angina_treat_now',\n 'carf_angina_traditional',\n 'carf_pain',\n 'carf_pain2',\n 'carf_pain_action_stopslow',\n 'carf_relief_standstill',\n 'carf_pain_location',\n 'carf_heartattack',\n 'carf_heartattack_treat',\n 'carf_heartattack_trad',\n 'carf_congestiv_heart_fail',\n 'carf_chf_treatment',\n 'carf_chf_treatment_now',\n 'carf_chf_traditional',\n 'carf_bp_measured',\n 'carf_hypertension',\n 'carf_hypertension_12mnths',\n 'carf_hypertension_treat',\n 'carf_hypertension_meds',\n 'carf_hypertension_trad',\n 'carf_cholesterol',\n 'carf_h_cholesterol',\n 'carf_chol_treatment',\n 'carf_chol_treatment_now',\n 'carf_chol_traditional',\n 'carf_thyroid',\n 'carf_thyroid_type',\n 'carf_thyroid_treatment',\n 'carf_thyroid_treat_use',\n 'carf_parents_thyroid',\n 'carf_thyroidparnt_specify',\n 'carf_kidney_disease',\n 'carf_kidney_disease_known',\n 'carf_kidney_function_low',\n 'carf_kidney_family',\n 'carf_kidney_family_mother',\n 'carf_kidney_family_father',\n 'carf_kidney_family_other',\n 'carf_kidney_family_type',\n 'carf_joints_swollen_pain',\n 'carf_joints_swollen',\n 'carf_joints_involved',\n 'carf_when_they_hurt',\n 'carf_symptoms_how_long',\n 'carf_arthritis_results',\n 'carf_rheumatoid_factor',\n 'carf_acpa',\n 'carf_esr_crp',\n 'carf_osteo_sites',\n 'carf_osteo_hip_repl_site',\n 'carf_osteo_knee_repl_site',\n 'gpaq_work_weekend',\n 'gpaq_work_sedentary',\n 'gpaq_work_vigorous',\n 'gpaq_work_vigorous_days',\n 'gpaq_work_moderate',\n 'gpaq_work_moderate_days',\n 'gpaq_transport_phy',\n 'gpaq_transport_phy_days',\n 'gpaq_leisure_phy',\n 'gpaq_leisure_vigorous',\n 'gpaq_leisurevigorous_days',\n 'gpaq_leisuremoderate',\n 'gpaq_leisuremoderate_days',\n 'gpaq_sleep_room_livestock',\n 'gpaq_sleep_on',\n 'gpaq_mosquito_net_use',\n 'gpaq_feel_alert',\n 'gpaq_sleeping_difficulty',\n 'gpaq_difficulty_staysleep',\n 'gpaq_waking_early_problem',\n 'gpaq_waking_up_tired',\n 'gpaq_sleep_pattern_satis',\n 'gpaq_sleep_interfere',\n 'anth_measurementcollector',\n 'bppm_measurementcollector',\n 'ultr_vat_scat_measured',\n 'ultr_cimt',\n 'ultr_plaque_measured',\n 'ultr_plaque_technician',\n 'ultr_plaque_right_present',\n 'ultr_dxa_scan_completed',\n 'resp_breath_shortness',\n 'resp_breath_shortness_ever',\n 'resp_mucus',\n 'resp_breath_too_short',\n 'resp_cough',\n 'resp_wheezing_whistling',\n 'resp_asthma_diagnosed',\n 'resp_asthma_treat',\n 'resp_asthma_treat_now',\n 'resp_copd_suffer',\n 'resp_copd_treat',\n 'resp_inhaled_medication',\n 'resp_measles_suffer',\n 'rspe_major_surgery',\n 'rspe_chest_pain',\n 'rspe_coughing_blood',\n 'rspe_acute_retinal_detach',\n 'rspe_any_pain',\n 'rspe_diarrhea',\n 'rspe_high_blood_pressure',\n 'rspe_tb_diagnosed',\n 'rspe_tb_treat_past4wks',\n 'rspe_infection',\n 'rspe_participation',\n 'rspe_wearing_tightclothes',\n 'rspe_wearing_dentures',\n 'rspe_researcher_question',\n 'spiro_eligible',\n 'spiro_researcher',\n 'spiro_pass',\n 'rspir_salb_admin',\n 'rspir_researcher',\n 'micr_take_antibiotics',\n 'micr_diarrhea_last_time',\n 'micr_worm_intestine_treat',\n 'micr_probiotics_t_period',\n 'micr_wormintestine_period',\n 'micr_probiotics_taken',\n 'bloc_fasting_confirmed',\n 'bloc_two_red_tubes',\n 'bloc_one_purple_tube',\n 'bloc_one_grey_tube',\n 'bloc_phlebotomist_name',\n 'bloc_urcontainer_batchnum',\n 'bloc_urine_tube_expiry',\n 'bloc_urine_collector',\n 'poc_test_conducted',\n 'poc_instrument_serial_num',\n 'poc_test_strip_batch_num',\n 'poc_teststrip_expiry_date',\n 'poc_researcher_name',\n 'poc_gluc_results_provided',\n 'poc_chol_results_provided',\n 'poc_glucresults_discussed',\n 'poc_cholresults_discussed',\n 'poc_seek_advice',\n 'poc_hiv_test_conducted',\n 'poc_hiv_pre_test',\n 'poc_test_kit_serial_num',\n 'poc_hiv_strip_batch_num',\n 'poc_hiv_strip_expiry_date',\n 'poc_technician_name',\n 'poc_hiv_test_result',\n 'poc_result_provided',\n 'poc_post_test_counselling',\n 'poc_post_test_worker',\n 'poc_hivpositive_firsttime',\n 'poc_hiv_seek_advice',\n 'tram_injury_ill_assault',\n 'tram_relative_ill_injured',\n 'tram_deceased',\n 'tram_family_friend_died',\n 'tram_marital_separation',\n 'tram_broke_relationship',\n 'tram_problem_with_friend',\n 'tram_unemployed',\n 'tram_sacked_from_your_job',\n 'tram_financial_crisis',\n 'tram_problems_with_police',\n 'tram_some_valued_lost']","repo_name":"SBIMB/AWIGEN-2-QC---PYTHON","sub_path":"src/DataAnalyser.py","file_name":"DataAnalyser.py","file_ext":"py","file_size_in_byte":24844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"19741006163","text":"from django.contrib import admin\n\n# Register your models here.\nfrom paperQ.models import *\n\nclass QuestionAdmin(admin.ModelAdmin):\n list_display = ('index', 'group', 'correct_answer')\n list_filter = ('group',)\n\nclass PersonAdmin(admin.ModelAdmin):\n list_display = ('name', 'question_group')\n\nclass AnswerAdmin(admin.ModelAdmin):\n list_display = ('q_index', 'group', 'person', 'ans_text')\n list_filter = ('person', 'question',)\n\n def q_index(self, obj):\n return obj.question.index\n \n def group(self, obj):\n return obj.question.group.name\n\n q_index.short_description = '問題番号'\n group.short_description = '問題群'\n\n\n\nadmin.site.register(QuestionGroup)\nadmin.site.register(Question, QuestionAdmin)\nadmin.site.register(Person, PersonAdmin)\nadmin.site.register(Answer, AnswerAdmin)\n","repo_name":"Raystar247/paper-quiz","sub_path":"paperQ/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2138506967","text":"from openpyxl import *\nfrom random import randint\nfrom math import ceil\n\n\ndef create_dict_from_table(values_dict: dict, row: int, start_column_values: int) -> dict:\n \"\"\" Формируем словарь из ячеек со значениями отличными от None, словарь (изначально пустой) передаем извне \"\"\"\n\n for cell in row[start_column_values:]:\n if cell.value is not None:\n values_dict.setdefault(cell.coordinate, cell.value)\n return values_dict\n\n\ndef find_max_and_values(values_dict: dict, limit_value=300) -> tuple[int, int, int, int, list, int]:\n \"\"\" Находим из словаря максимальное количество продаж (и формируем из них список), общее число продаж,\n количество оптовых продаж, сумму оптовых продаж\n \"\"\"\n\n\n sum_sales = 0 # Общее количество проданных товаров\n sum_sales_count = 0 # Количество ячеек с продажами\n opt_sum_sales = 0 # Общее количество товаров, проданных оптом\n opt_sales_count = 0 # Количество ячеек с оптовыми продажами\n opt_sales_list = [] # Список оптовых продаж, добавляется кортеж (ячейка, значение)\n\n for key, value in values_dict.items():\n if value >= limit_value:\n opt_sales_list.append((key, value))\n opt_sum_sales += value\n values_dict[key] = randint(0, 2) # Приравниваем значения больше установленного предела к 0...2\n opt_sales_count += 1\n sum_sales += value\n sum_sales_count += 1\n\n clear_sales = sum_sales - opt_sum_sales\n\n return sum_sales, sum_sales_count, opt_sum_sales, opt_sales_count, opt_sales_list, clear_sales\n\n\ndef spread_values(values_dict: dict, clear_sales: int, sum_sales: int, opt_sum_sales: int, limit_value=300)\\\n -> tuple[dict, int, int]:\n \"\"\" Распределяем значения по таблице, в соответствии с их процентным соотношением. Значения больше предельных\n уменьшаем на 1...10\n \"\"\"\n\n new_values_dict = values_dict.copy()\n sum_values_dict = sum([value for value in new_values_dict.values()])\n\n for key, value in new_values_dict.items():\n if value != 1:\n new_value = ceil(new_values_dict[key] + (new_values_dict[key] / clear_sales) * opt_sum_sales)\n difference = new_value - value\n\n if new_value < limit_value:\n if sum_values_dict + difference < sum_sales:\n new_values_dict[key] = new_value\n sum_values_dict += difference\n else:\n new_diff = sum_sales - sum_values_dict\n new_values_dict[key] = value + new_diff\n sum_values_dict += new_diff\n break\n else:\n corr = randint(1, 10)\n new_value = limit_value - corr\n difference = new_value - value\n new_values_dict[key] = new_value\n sum_values_dict += difference\n\n # Сумма новых значений в словаре\n new_values_dict_sum = sum([value for value in new_values_dict.values()])\n\n # Остаток после разбивки\n remain = sum_sales - sum_values_dict\n\n return new_values_dict, remain, new_values_dict_sum\n\n\ndef spread_remain(new_values_dict: dict, remain: int, limit_value=300) -> tuple[dict, int]:\n \"\"\" Распределение остатка (один прогон) \"\"\"\n\n remain_dec = remain % 10\n remain_all = (remain // 10) * 10\n\n for key, value in new_values_dict.items():\n if remain_dec:\n if 31 <= value and (value + remain_dec) < limit_value:\n new_values_dict[key] += remain_dec\n remain_dec = 0\n elif remain_all:\n if remain_all > 25:\n if 51 <= value and (value + 25) < limit_value:\n new_values_dict[key] += 25\n remain_all -= 25\n else:\n if 51 <= value and (value + remain_all) < limit_value:\n new_values_dict[key] += remain_all\n remain_all -= remain_all\n break\n else:\n continue\n else:\n continue\n\n remain = remain_all\n\n return new_values_dict, remain\n\n\ndef create_new_file(file_name: str, result_values_list: list[dict]) -> None:\n \"\"\" Создаем новый файл и копируем в него данные из получившегося словаря \"\"\"\n\n wb = load_workbook(filename=file_name)\n ws = wb.active\n\n for new_values_dict in result_values_list:\n for key, value in new_values_dict.items():\n ws[key] = value\n\n wb.save('new_' + file_name)\n\n\ndef create_result_file(result: list[dict]) -> None:\n \"\"\" Создаем файл с результатами разбивки для всех товаров в исходном файле \"\"\"\n\n wb = Workbook()\n ws = wb.active\n ws.title = 'Результаты'\n\n ws.column_dimensions['A'].width = 40\n ws.column_dimensions['B'].width = 20\n\n row_counter = 1\n\n for item in result:\n for key, value in item.items():\n ws.cell(row=row_counter, column=1).value = key\n ws.cell(row=row_counter, column=2).value = value\n row_counter += 1\n row_counter += 1\n\n wb.save(filename='Результаты.xlsx')\n","repo_name":"oneMayday/Excel-spreader","sub_path":"spreader.py","file_name":"spreader.py","file_ext":"py","file_size_in_byte":5804,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}