diff --git "a/3119.jsonl" "b/3119.jsonl" new file mode 100644--- /dev/null +++ "b/3119.jsonl" @@ -0,0 +1,239 @@ +{"seq_id":"30101188529","text":"# -*- coding:utf-8 -*-\nimport tensorflow as tf\n# import numpy as np\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nimport tensorflow_addons as tfa\n\n\nclass Data_set(object):\n def __init__(self, config, shuffle, name, path):\n self.tfrecord_file = config.tfdata_path\n self.batch_size = config.batch_size\n self.min_after_dequeue = config.min_after_dequeue\n self.capacity = config.capacity\n self.actual_image_size = config.train_image_size\n self.shuffle = shuffle\n self.name = name\n\n # Set up the ImageDataGenerator with desired preprocessing\n self.datagen = ImageDataGenerator(\n rescale=1./255,\n )\n self.train_generator = self.datagen.flow_from_directory(\n path,\n target_size=(self.actual_image_size, self.actual_image_size),\n batch_size=self.batch_size,\n shuffle=self.shuffle,\n class_mode='categorical',\n seed=42\n )\n\n def next_iter(self):\n image_batch, label_batch = next(self.train_generator)\n # random rotate\n\n image_batch = tf.convert_to_tensor(image_batch)\n image_batch = tfa.image.rotate(\n image_batch,\n tf.random.uniform(shape=(tf.shape(image_batch)[0], ),\n minval=-0.5,\n maxval=0.5, seed=37),\n interpolation='BILINEAR')\n image_batch = dataaugmentation(image_batch)\n label_batch = tf.convert_to_tensor(label_batch)\n\n return image_batch, label_batch\n\n\ndef dataaugmentation(images):\n images = tf.image.random_flip_up_down(images)\n images = tf.image.random_flip_left_right(images)\n return images\n","repo_name":"tansyab1/PhD-project","sub_path":"2023/Classification/related_works/73/Triple-ANet-master/utilsForTF.py","file_name":"utilsForTF.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"7335753576","text":"\"\"\" \nauthor: Pedro Pessoa Ramos\nemail: pedropessoaramos@gmail.com\nGitHub: https://github.com/O-Pessoa\nTrabalho sobre a cifra de vigenere da matéria de Segurança Computacional da UnB. (180026488)\n\nCifra de Vigenere\n\"\"\"\n\nimport sys\nimport argparse\nfrom init import Init\nfrom encrypt import Encrypt\nfrom decrypt import Decrypt\n\nclass Menu:\n def __init__(self):\n try:\n self.parser = argparse.ArgumentParser(description=__doc__,epilog='https://github.com/O-Pessoa', prog='Cifra de Vigenere - 0.1', formatter_class=argparse.RawDescriptionHelpFormatter)\n self.parser.add_argument('-v', '--version', action='version', help='Mostra a versão do programa', version='%(prog)s')\n \n subparsers = self.parser.add_subparsers(required=True, dest='commands', title='commands', help='allowed commands')\n\n self.parserEncrypt = subparsers.add_parser('encrypt', help='Cifrar')\n self.parserEncrypt.add_argument('-k', '--key', type=str, help='Chave da cifra', required=False)\n self.parserEncrypt.add_argument('-o', '--output', type=str, help='Arquivo de saida', required=False)\n group = self.parserEncrypt.add_mutually_exclusive_group(required=True)\n group.add_argument('-m', '--message', type=str, help='Mensagem', required=False)\n group.add_argument('-f', '--file', type=str, help='Arquivo de entrada', required=False)\n\n self.parserDencrypt = subparsers.add_parser('decrypt', help='Decifrar')\n self.parserDencrypt.add_argument('-f', '--file', type=str, help='Arquivo cifrado', required=False)\n self.parserDencrypt.add_argument('-l', '--lang', type=str, help='língua da mensagem original', metavar='pt', default='pt', required=False)\n self.parserDencrypt.add_argument('-o', '--output', type=str, help='Arquivo de saida', required=False)\n\n subparsers.add_parser('init', help='Gerar os arquivos padrões')\n \n self.options = self.parser.parse_args()\n if self.options.commands == 'init':\n Init()\n elif self.options.commands == 'encrypt':\n Encrypt(key=self.options.key, message=self.options.message, output=self.options.output, file=self.options.file)\n elif self.options.commands == 'decrypt':\n Decrypt(file=self.options.file, lang=self.options.lang, output=self.options.output)\n except Exception as err:\n self.showError()\n\n def showError(self):\n self.parser.print_help()\n sys.exit(2)\n ","repo_name":"O-Pessoa/Fuck-Vigenere","sub_path":"menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"17523001391","text":"import os\n\n# Libs\nimport argparse\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler\nimport matplotlib as m\nimport matplotlib.pyplot as plt\nfrom pandas.plotting import register_matplotlib_converters\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.linear_model import LassoCV\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.feature_selection import SelectFromModel\nimport statsmodels.api as sm\nfrom sklearn.linear_model import LogisticRegressionCV\nfrom sklearn.feature_selection import RFE\n\n# Own modules\nimport utils.data_visualization_functions as vis\nimport utils.data_handling_support_functions as sup\n\n__author__ = 'Alexander Wendt'\n__copyright__ = 'Copyright 2020, Christian Doppler Laboratory for ' \\\n 'Embedded Machine Learning'\n__credits__ = ['']\n__license__ = 'ISC'\n__version__ = '0.2.0'\n__maintainer__ = 'Alexander Wendt'\n__email__ = 'alexander.wendt@tuwien.ac.at'\n__status__ = 'Experiental'\n\nregister_matplotlib_converters()\n\n#Global settings\nnp.set_printoptions(precision=3)\n#Suppress print out in scientific notiation\nnp.set_printoptions(suppress=True)\n\nparser = argparse.ArgumentParser(description='Step 3 - Perform feature selection')\nparser.add_argument(\"-conf\", '--config_path', default=\"config/debug_timedata_omxS30.ini\",\n help='Configuration file path', required=False)\n\nargs = parser.parse_args()\n\n\ndef predict_features_simple(X, y):\n '''\n\n\n '''\n\n clf = LogisticRegression(random_state=0, solver='lbfgs', multi_class='multinomial').fit(X, y)\n return clf.score(X, y)\n\ndef execute_lasso_feature_selection(X_scaled, y, conf, image_save_directory):\n '''\n\n\n '''\n\n print(\"Feature selection with lasso regression\")\n reg = LassoCV(cv=10, max_iter=100000)\n reg.fit(X_scaled, y)\n coef = pd.Series(reg.coef_, index=X_scaled.columns)\n print(\"Best alpha using built-in LassoCV: %f\" % reg.alpha_)\n print(\"Best score using built-in LassoCV: %f\" % reg.score(X_scaled, y))\n print(\"Lasso picked \" + str(sum(coef != 0)) + \" variables and eliminated the other \" + str(\n sum(coef == 0)) + \" variables\")\n imp_coef = coef.sort_values()\n coefList = list(imp_coef[imp_coef != 0].index)\n print(\"Lasso coefficient list\\n:\", coefList)\n\n # plt.figure()\n m.rcParams['figure.figsize'] = (8.0, 20.0)\n imp_coef.plot(kind=\"barh\")\n plt.title(\"Feature importance using Lasso Model\")\n plt.tight_layout()\n\n vis.save_figure(plt.gcf(), image_save_directory=image_save_directory, filename=\"Lasso_Model_Weights\")\n\n #if image_save_directory:\n # if not os.path.isdir(image_save_directory):\n # os.makedirs(image_save_directory)\n # plt.savefig(os.path.join(image_save_directory, conf['Common'].get('dataset_name') + '_Lasso_Model_Weights'), dpi=300)\n\n #plt.show(block = False)\n\n return coefList\n\ndef execute_treebased_feature_selection(X_scaled, y, conf, image_save_directory):\n '''\n\n\n '''\n\n print(\"Tree based feature selection\")\n clf = ExtraTreesClassifier(n_estimators=50)\n clf = clf.fit(X_scaled, y)\n print(clf.feature_importances_)\n print(\"Best score: %f\" % clf.score(X_scaled, y))\n model = SelectFromModel(clf, prefit=True)\n X_new = model.transform(X_scaled)\n X_new.shape\n\n threshold = 0.010\n tree_coef = pd.Series(clf.feature_importances_, index=X_scaled.columns)\n\n print(\"Tree search picked \" + str(sum(tree_coef >= threshold)) + \" variables and eliminated the other \" + str(\n sum(tree_coef < threshold)) + \" variables\")\n imp_treecoef = tree_coef.sort_values()\n treecoefList = list(imp_treecoef[imp_treecoef > threshold].index)\n\n print(\"Tree based coefficent list:\\n\", treecoefList)\n\n plt.figure()\n m.rcParams['figure.figsize'] = (8.0, 20.0)\n imp_treecoef.plot(kind=\"barh\")\n plt.title(\"Feature importance using Tree Search Model\")\n plt.vlines(threshold, 0, len(X_scaled.columns), color='red')\n plt.tight_layout()\n\n vis.save_figure(plt.gcf(), image_save_directory=image_save_directory, filename=\"Tree_Based_Importance\")\n\n #if image_save_directory:\n # if not os.path.isdir(image_save_directory):\n # os.makedirs(image_save_directory)\n # plt.savefig(os.path.join(image_save_directory, conf['Common'].get('dataset_name') + '_Tree_Based_Importance'), dpi=300)\n\n #plt.show(block = False)\n\n return treecoefList\n\ndef execute_backwardelimination_feature_selection(X_scaled, y):\n '''\n\n\n '''\n\n print(\"Backward elimination\")\n cols = list(X_scaled.columns)\n pmax = 1\n while (len(cols) > 0):\n p = []\n X_1 = X_scaled[cols]\n X_1 = sm.add_constant(X_1)\n model = sm.OLS(y, X_1).fit()\n p = pd.Series(model.pvalues.values[1:], index=cols)\n pmax = max(p)\n feature_with_p_max = p.idxmax()\n if (pmax > 0.05):\n cols.remove(feature_with_p_max)\n else:\n break\n selected_features_BE = cols\n\n print(\"Selected features:\")\n print(selected_features_BE)\n print(\"\\nNumber of features={}. Original number of features={}\\n\".format(len(selected_features_BE),\n len(X_scaled.columns)))\n [print(\"column {} removed\".format(x)) for x in X_scaled.columns if x not in selected_features_BE]\n print(\"Finished\")\n\n return selected_features_BE\n\ndef execute_recursive_elimination_feature_selection(X_scaled, y):\n '''\n\n\n '''\n\n print(\"Recursive elimination\")\n model = LogisticRegressionCV(solver='liblinear', cv=3)\n print(\"Start Recursive Elimination. Fit model with {} examples.\".format(X_scaled.shape[0]))\n # Initializing RFE model, 3 features selected\n rfe = RFE(model)\n # Transforming data using RFE\n X_rfe = rfe.fit_transform(X_scaled, y)\n # Fitting the data to model\n model.fit(X_rfe, y)\n\n print(\"Best accuracy score using built-in Logistic Regression: \", model.score(X_rfe, y))\n print(\"Ranking\")\n rfe_coef = pd.Series(X_scaled.columns, index=rfe.ranking_ - 1).sort_index()\n print(rfe_coef)\n\n print(\"Selected columns\")\n print(X_scaled.columns[rfe.support_].values)\n\n return X_scaled.columns[rfe.support_].values, rfe_coef\n\ndef create_feature_list_from_top_features(relevantFeatureList):\n '''\n Use the list of relevant features\n\n\n '''\n ### Weighted values\n # Weights\n values, counts = np.unique(relevantFeatureList, return_counts=True)\n s = pd.Series(index=values, data=counts).sort_values(ascending=False)\n print(s)\n ### Add Manually Selected Subset\n # print subset\n newval = [x for x, c in zip(values, counts) if c > 1]\n subsetColumns = newval # X.columns[rfe.support_].values #list(values)\n\n print(\"Subset columns:\\n\", subsetColumns)\n\n return subsetColumns\n\n\ndef perform_feature_selection_algorithms(features, y, conf, image_save_directory):\n '''\n Perform feature selection\n\n\n '''\n\n # Scale\n # Use this scaler also for the test data at the end\n X_scaled = pd.DataFrame(data=StandardScaler().fit(features).transform(features),\n index=features.index, columns=features.columns)\n\n # Reduce the training set with the number of samples randomly chosen\n X_train_index_subset = sup.get_random_data_subset_index(1000, X_scaled)\n print(\"Scaled data with standard scaler.\")\n print(\"Get a subset of 1000 samples.\")\n\n relevantFeatureList = []\n selected_feature_list = pd.DataFrame()\n\n # Predict with logistic regression\n\n\n ### Lasso Feature Selection\n m.rc_file_defaults() # Reset sns\n coefList = execute_lasso_feature_selection(X_scaled, y, conf, image_save_directory)\n selected_feature_list = selected_feature_list.append(pd.Series(name='Lasso', data=coefList))\n relevantFeatureList.extend(coefList)\n\n print(\"Prediction of training data with logistic regression: {0:.2f}\".format(\n predict_features_simple(X_scaled[coefList], y)))\n\n ### Tree based feature selection\n treecoefList = execute_treebased_feature_selection(X_scaled, y, conf, image_save_directory)\n selected_feature_list = selected_feature_list.append(pd.Series(name='Tree', data=treecoefList))\n relevantFeatureList.extend(treecoefList)\n\n print(\"Prediction of training data with logistic regression: {0:.2f}\".format(\n predict_features_simple(X_scaled[treecoefList], y)))\n\n ### Backward Elimination\n # Backward Elimination - Wrapper method\n selected_features_BE = execute_backwardelimination_feature_selection(X_scaled, y)\n relevantFeatureList.extend(selected_features_BE)\n selected_feature_list = selected_feature_list.append(\n pd.Series(name='Backward_Elimination', data=selected_features_BE))\n\n print(\"Prediction of training data with logistic regression: {0:.2f}\".format(\n predict_features_simple(X_scaled[selected_features_BE], y)))\n\n ### Recursive Elimination with Logistic Regression\n # Recursive Elimination - Wrapper method, Feature ranking with recursive feature elimination\n relevant_features, rfe_coef = execute_recursive_elimination_feature_selection(X_scaled.iloc[X_train_index_subset],\n y[X_train_index_subset])\n relevantFeatureList.extend(relevant_features)\n\n step_size = np.round(len(X_scaled.columns) / 4, 0).astype(int)\n for i in range(step_size, len(X_scaled.columns), step_size):\n selected_feature_list = selected_feature_list.append(\n pd.Series(name='RecursiveTop' + str(i), data=rfe_coef.iloc[0:i-1]).reset_index()['RecursiveTop' + str(i)])\n print('Created RecursiveTop{}'.format(str(i)))\n\n ### Add the top coloums from all methods\n top_feature_cols = create_feature_list_from_top_features(relevantFeatureList)\n selected_feature_list = selected_feature_list.append(pd.Series(name='Manual', data=top_feature_cols))\n\n ### Add all columns\n selected_feature_list = selected_feature_list.append(pd.Series(name='All', data=X_scaled.columns))\n\n return selected_feature_list\n\n\ndef main(config_path):\n conf = sup.load_config(config_path)\n features, y, df_y, class_labels = sup.load_features(conf)\n\n image_save_directory = conf['Paths'].get('results_directory') + \"/data_preparation\"\n\n selected_feature_columns_filename = os.path.join(conf['Preparation'].get(\"selected_feature_columns_out\"))\n\n selected_feature_list = perform_feature_selection_algorithms(features, y, conf, image_save_directory)\n\n print(\"List of selected features\")\n print(selected_feature_list.transpose())\n\n selected_feature_list.transpose().to_csv(selected_feature_columns_filename, sep=';', index=False, header=True)\n print(\"Saved selected feature columns to \" + selected_feature_columns_filename)\n\n\nif __name__ == \"__main__\":\n main(args.config_path)\n\n print(\"=== Program end ===\")","repo_name":"alexanderwendt/sklearn_ml_toolbox","sub_path":"step35_perform_feature_selection.py","file_name":"step35_perform_feature_selection.py","file_ext":"py","file_size_in_byte":10882,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"36515348346","text":"# -*- coding: UTF-8 -*-\nfrom pokedex100 import PokeDex100\nfrom telegrambot import telegrambot\nfrom recentpokemon import save_pokemon\nfrom userpref import userpref\n\nRARE_LIST_DEFAULT = [\"togetic\", \"unown\", \"dragonite\", \"tyranita\", \"scyther\", \"lapras\"]\n\ndef parse_ok_cb(data):\n\tdetail = data[\"detail\"]\n\tpname = detail[\"name\"]\n\tregion_detector = pname.split(\" \")[0]\n\tif region_detector == \"Alolan\":\n\t\tpname = pname.split(\" \")[1]\n\n\t# Prepare poke data\n\tnew_url = \"https://pokefree.silverwolfceh.repl.co/showdata?code=%s\" % detail[\"urlcode\"]\n\ttelegram_fmt = \"%s CP%s L%s URL: %s | SHOW: %s\" % (detail[\"name\"], detail[\"cp\"], detail[\"L\"],detail[\"url\"], new_url)\n\tif detail[\"shiny\"]:\n\t\ttelegram_fmt = \"**\" + telegram_fmt\n\tpoke_saved = False\n\n\n\t# Find if any user interested in pokemon\n\tuids = userpref.get_instance().get_pref(pname.lower())\n\tfor usr in uids:\n\t\tif poke_saved == False:\n\t\t\tsave_pokemon(detail)\n\t\t\tpoke_saved = True\n\t\tif userpref.get_instance().uid_has_noti(usr):\n\t\t\ttelegrambot.sendMessage(usr, telegram_fmt)\n\n\t# Channel evaluation\n\tif pname.lower() in telegrambot.get_rare_poke() or int(detail[\"L\"]) >= 30 or int(detail[\"cp\"]) >= 2500:\n\t\tif poke_saved == False:\n\t\t\tsave_pokemon(detail)\n\t\t\tpoke_saved = True\n\t\ttelegrambot.sendMessage(\"@tongvuu\", telegram_fmt)\n\telse:\n\t\tprint(\"Pokemon %s not sastify the condition\" % detail[\"name\"] )\n\ndef parse_err_cb(data):\n\tpass\n\ndef pokemon_route(msg):\n\tcname = msg.channel.name\n\tif cname.startswith(\"level30community\") or cname.startswith(\"unown\") or cname.startswith(\"100iv_\"):\n\t\tins = PokeDex100.getInstance()\n\t\tins.parse(msg, parse_ok_cb, parse_err_cb)\n\ndef reveal_cords(url):\n\tins = PokeDex100.getInstance()\n\treturn ins.reveal_cords(url)\n","repo_name":"silverwolfceh/pokefree","sub_path":"pokemon.py","file_name":"pokemon.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"35652425860","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Oct 28 14:43:39 2019\r\n\r\n@author: Sk Mobin\r\n\"\"\"\r\n\r\nfrom urllib.parse import urljoin\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\n\r\nurl = \"https://www.biscosuisse.ch/de/uber-uns/mitglieder/\"\r\nreq = requests.get(url)\r\nsoup = BeautifulSoup(req.text, 'lxml')\r\ntab = soup.findAll('div', {'class': 'container-fluid'})\r\n\r\nfile = open('assignment.csv', 'w')\r\nheader = \"Name, Email, Phone, Fax, Website\\n\"\r\nfile.write(header)\r\n\r\nfor tabb in tab:\r\n name = tabb.findAll('h3')[0].text\r\n div = tabb.findAll('div', {'class': 'col-md-4'})\r\n try:\r\n tel, fax = div[1].text.lstrip().rstrip().split('\\n')\r\n except:\r\n tel = div[1].text.lstrip().rstrip()\r\n fax = 'NaN'\r\n #print(tel.replace('Tel.', ''), fax.replace('Fax ', ''))\r\n aas = div[2].findAll('a')\r\n if(len(aas) == 2):\r\n email = aas[0].attrs['href'].replace('mailto:', '')\r\n web = aas[1].attrs['href']\r\n else:\r\n web = aas[0].attrs['href']\r\n email = 'NaN'\r\n print(email, web)\r\n file.write(name + ', ' + email + ', ' + tel + ', ' + fax + ', ' + web + '\\n')\r\nfile.close()","repo_name":"manzartoflo/biscosuisse","sub_path":"assignment.py","file_name":"assignment.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"70689875189","text":"#Tyler Moore\n#Starter code for HW5 Q1\n\n#Justin McNiel and David Fantin\n\ndef read_playlist(filename):\n\t\"\"\"\n\tInput: filename of CSV file listing (song,artist,genre) triples\n\tOutput: List of (song,artist,genre)\n\t\"\"\"\n\tplaylist = []\n\tfor line in open(filename):\n\t\tbits = [b.strip() for b in line.split(',')]\n\t\tplaylist.append(bits)\n\treturn playlist\n\ndef iter_playlist_transform(s,t,compareType=\"Song\"):\n\t\"\"\"\n\tComputes the edit distance for two playlists s and t, and prints the minimal edits \n\t required to transform playlist s into playlist t.\n\tInputs:\n\ts: 1st playlist (format: list of (track name, artist, genre) triples)\n\tt: 2nd playlist (format: list of (track name, artist, genre) triples)\n\tcompareType: String indicating the type of comparison to make.\n\t \"Song\" (default): songs in a playlist are considered equivalent if the \n\t\t (song name, artist, genre) triples match.\n\t \"Genre\": songs in a playlist are considered equivalent if the same genre is used.\n\t \"Artist\": songs in a playlist are considered equivalent if the same artist is used.\n\tOutput: The minimum edit distance and the minimal edits required to transform playlist\n\t s into playlist t.\n\t\"\"\"\n\tpass\n\t\n\tcompare_operations = ['song', 'artist', 'genre']\n\tcompare_index = compare_operations.index(compareType.lower())\n\t\n\tdef equivalent(song1, song2):\n\t\tif compare_index == 0:\n\t\t\treturn song1 == song2\n\t\telse:\n\t\t\treturn song1[compare_index] == song2[compare_index]\n\t\n\tC = []\n\t\n\ttmp = [[]]\n\ttmp.extend(s)\n\ts = tmp\n\t\n\ttmp = [[]]\n\ttmp.extend(t)\n\tt = tmp\n\t\n\tedits = {}\n\t\n\tC.append([x for x in range(len(t)+1)])\n\tfor i in range(len(s)):\n\t\tC.append([i+1])\n\t\tedits[i] = {}\n\t\tif i != 0:\n\t\t\tedits[i][0] = (\"Remove %s\\n\"%(str(s[i])),2)\n\t\n\tfor j in range(1,len(t)):\n\t\tedits[0][j] = (\"Insert %s\\n\"%(str(t[j])),1)\n\t\t\n\tfor i in range(1, len(s)):\n\t\tfor j in range(1, len(t)):\n\t\t\tc_match, c_ins, c_del = None, None, None\n\t\t\tmatchEdit = \"\"\n\t\t\tif equivalent(s[i],t[j]):\n\t\t\t\tc_match = C[i-1][j-1]\n\t\t\t\tmatchEdit += \"Leave %s\\n\"%(str(s[i]))\n\t\t\telse:\n\t\t\t\tc_match = C[i-1][j-1]+1\n\t\t\t\tmatchEdit += \"Replace %s with %s\\n\"%(str(s[i]),str(t[j]))\n\t\t\t\n\t\t\tc_ins = C[i][j-1]+1\n\t\t\tc_del = C[i-1][j]+1\n\t\t\t\n\t\t\tc_min = min(c_match, c_ins, c_del)\n\t\t\t\n\t\t\tif c_min == c_match:\n\t\t\t\tedits[i][j] = (matchEdit,0)\n\t\t\telif c_min == c_ins:\n\t\t\t\tedits[i][j] = (\"Insert %s\\n\"%(str(t[j])),1)\n\t\t\telif c_min == c_del:\n\t\t\t\tedits[i][j] = (\"Remove %s\\n\"%(str(s[i])),2)\n\t\t\telse:\n\t\t\t\traise Exception('wut')\n\t\t\t\n\t\t\tC[i].append(c_min)\n\t\t\t\n\tprint(\"%s edits required to turn playlist 1 into playlist 2.\"%C[i][j])\n\t\n\ttmpI, tmpJ = i, j\n\t\n\toutputLst = [edits[i][j][0]]\n\t\n\twhile i != 0 or j != 0:\n\t\tcode = edits[i][j][1]\n\t\tif code == 0:\n\t\t\ti -= 1\n\t\t\tj -= 1\n\t\telif code == 1:\n\t\t\ti -= 1\n\t\telif code == 2:\n\t\t\tj -= 1\n\t\t\n\t\tif i==0 and j==0:\n\t\t\tbreak\n\t\t\n\t\ttry:\n\t\t\toutputLst.append(edits[i][j][0])\n\t\texcept KeyError:\n\t\t\t'''for x in range(10):\n\t\t\t\tprint('')\n\t\t\tprint(outputLst, i, j)\n\t\t\t\n\t\t\tfor x in edits.keys():\n\t\t\t\tprint(x, edits[x])'''\n\t\t\tbreak\n\t\n\toutputLst.reverse()\n\t\n\tprint(''.join(outputLst))\n\t\n\ti, j = tmpI, tmpJ\n\t\n\treturn C[i][j]\n\t\t\n\n\nif __name__==\"__main__\":\n\t#obtain local copy from http://secon.utulsa.edu/cs2123/blues1.csv\n\tb1 = read_playlist(\"blues1.csv\")\n\t#obtain local copy from http://secon.utulsa.edu/cs2123/blues2.csv\n\tb2 = read_playlist(\"blues2.csv\")\n\tprint(\"Playlist 1\")\n\tfor song in b1:\n\t\tprint(song)\n\tprint(\"Playlist 2\")\n\tfor song in b2:\n\t\tprint(song)\n\tprint(\"Comparing playlist similarity by song\")\n\titer_playlist_transform(b1,b2)\n\tprint(\"Comparing playlist similarity by genre\")\n\titer_playlist_transform(b1,b2,\"Genre\")\n\tprint(\"Comparing playlist similarity by artist\")\n\titer_playlist_transform(b1,b2,\"Artist\")\n\t#include your own playlists below\n\t\n\tprint(\"\\n\\n\\n\\n\")\n\tp1 = read_playlist(\"Playlist1.csv\")\n\tp2 = read_playlist(\"Playlist2.csv\")\n\tprint(\"Playlist 1\")\n\tfor song in p1:\n\t\tprint(song)\n\tprint(\"Playlist 2\")\n\tfor song in p2:\n\t\tprint(song)\n\tprint(\"Comparing playlist similarity by song\")\n\titer_playlist_transform(p1,p2)\n\tprint(\"Comparing playlist similarity by genre\")\n\titer_playlist_transform(p1,p2,\"Genre\")\n\tprint(\"Comparing playlist similarity by artist\")\n\titer_playlist_transform(p1,p2,\"Artist\")","repo_name":"DavidFantin/UndergradProjects","sub_path":"CS-2123_DataStructures--Projects/workspace-CS-2123-Python/P5/Final Submission/P5.py","file_name":"P5.py","file_ext":"py","file_size_in_byte":4125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"43091230887","text":"# Description:\n# Author:朱勇\n# Time:2021/3/6 10:41\nimport pandas as pd\nimport numpy as np\nfrom sklearn.naive_bayes import CategoricalNB\nfrom sklearn.metrics import accuracy_score\n\ndata = pd.read_csv(\"task2_data.csv\")\nx = data.drop([\"y\"],axis=1)\ny = data.loc[:,\"y\"]\nmodel = CategoricalNB()\nmodel.fit(x,y)\ny_predict = model.predict(x)\nprint(accuracy_score(y,y_predict))\nx_test = np.array([[2,1,1,1,1],[2,1,1,1,0],[2,1,1,0,0],[2,1,0,0,0],[2,0,0,0,0]])\ny_test_predict_prob = model.predict_proba(x_test)\ny_test_predict = model.predict(x_test)\n#数据组合\ntest_data_result = np.concatenate((x_test,y_test_predict_prob,y_test_predict.reshape(5,1)),axis=1)\n#格式转化\ntest_data_result = pd.DataFrame(test_data_result)\n#列名称替换\ntest_data_result.columns = [\"score\",\"school\",\"award\",\"gender\",\"English\",\"p0\",\"p1\",\"p2\",\"y_test_predict\"]\ntest_data_result.to_csv(\"test_data_result.csv\")","repo_name":"zhuyongWillem/other_classification","sub_path":"bayes_application.py","file_name":"bayes_application.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"19292287020","text":"############DEBUGGING#####################\n\n# #Print is Your Friend\n# pages = 0\n# word_per_page = 0\n# pages = int(input(\"Number of pages: \"))\n# word_per_page == int(input(\"Number of words per page: \"))\n# total_words = pages * word_per_page\n# print(total_words)\n\n# Tem que imprimir no console as variáveis para saber qual o valor está sendo recebido, no caso específico está sendo recebido o valor '0' declarado inicialmente nas duas primeiras linhas do código, basta apagá-las.\n\n# A varíavel word_per_page está utilizando o sinal de igual duplo, que é utilizado somente para condições, só apagar um também.\n\n\n# #Print is Your Friend\npages = int(input(\"Number of pages: \"))\nword_per_page = int(input(\"Number of words per page: \"))\n#print(pages)\n#print(word_per_page)\ntotal_words = pages * word_per_page\nprint(total_words)","repo_name":"Raul-Albuquerque/Curso-PYTHON","sub_path":"day 13/5. Print_is_your_friend.py","file_name":"5. Print_is_your_friend.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"17310596299","text":"from django.shortcuts import render, redirect\nfrom django.utils import timezone\nfrom .models import Post #connect Post model to this view code\nfrom django.shortcuts import render, get_object_or_404\nfrom .forms import PostForm\n\n\n# Create your views here.\n\n\n\ndef post_list(request): #take a request arguement\n\n posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')\n return render(request, 'blog/post_list.html', {'posts':posts}) #renders the request on the html page\n\n\ndef post_detail(request, pk):\n post = get_object_or_404(Post, pk=pk)\n return render(request, 'blog/post_detail.html', {'post': post})\n\ndef post_new(request):\n\n if request.method == \"POST\": #if the request method is equal to POST\n form = PostForm(request.POST) #form is defined as postform object with POST passed in\n if form.is_valid():\n post = form.save(commit=False) #save the form\n post.author = request.user #set author to user\n post.published_date = timezone.now()\n post.save()\n return redirect('post_detail', pk=post.pk) #redirect page to post\n else:\n form = PostForm() #create form variable as a new PostForm\n\n return render(request, 'blog/post_edit.html', {'form': form})\n\ndef post_edit(request, pk):\n post = get_object_or_404(Post, pk=pk)\n if request.method == \"POST\":\n form = PostForm(request.POST, instance=post)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = request.user\n post.published_date = timezone.now()\n post.save()\n return redirect('post_detail', pk=post.pk)\n else:\n form = PostForm(instance=post)\n return render(request, 'blog/post_edit.html', {'form': form})","repo_name":"michaelNinh/my-first-blog","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"13486613345","text":"#!/usr/bin/python\n# emailer.py -- https://github.com/stooged/Klipper-Plugins/blob/main/non%20integrated/emailer.py\nimport time\nimport datetime\nimport websocket # websocket-client\nfrom PIL import Image # Pillow\nimport requests # requests\nfrom pathlib import Path\nimport configparser\nimport json\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.mime.image import MIMEImage\nfrom email.mime.multipart import MIMEMultipart\nfrom io import BytesIO\ntry:\n import thread\nexcept ImportError:\n import _thread as thread\n\nprinting = False\ngfilename = None\n\ncfg_file = Path(\"/home/pi/printer_data/config/emailer.cfg\")\nif cfg_file.is_file():\n config = configparser.ConfigParser()\n config.read(\"/home/pi/printer_data/config/emailer.cfg\")\n send_email_notifications = config.getboolean(\"emailer\", \"send_email_notifications\", fallback=False)\n machine_name = config.get(\"emailer\", \"machine_name\")\n send_image = config.getboolean(\"emailer\", \"send_image\", fallback=False)\n sender_email = config.get(\"emailer\", \"sender_email\")\n sender_password = config.get(\"emailer\", \"sender_password\")\n receiver_email = config.get(\"emailer\", \"receiver_email\")\n smtp_host = config.get(\"emailer\", \"smtp_host\")\n smtp_port = config.getint(\"emailer\", \"smtp_port\", fallback=587)\n\nelse: #hard codeed values used if emailer.cfg is missing\n\n machine_name = \"Printer\" # printer name used in email\n send_email_notifications = True # enable email notification on print complete and error\n send_image = False # include snapshot from webcam with email notification\n sender_email = \"email@email.com\" # email to send from\n sender_password = \"password\" # password for sending email\n receiver_email = \"email@email.com\" # email to send to\n smtp_host = \"smtp.email.com\" # smtp host for email provider\n smtp_port = 587 # smtp port\n\n\n\ndef subscribe():\n return {\n \"jsonrpc\": \"2.0\",\n \"method\": \"printer.objects.subscribe\",\n \"params\": {\n \"objects\": {\n \"print_stats\": [\"filename\", \"state\", \"print_duration\", \"message\"]\n }\n },\n \"id\": \"8437\"\n }\n\n\n\ndef send_email(status, filename, dura):\n try:\n if printing == True and send_email_notifications == True:\n subject = machine_name + \": \" + status\n if filename == None:\n filename = \"Print\" \n body = filename + \" \" + status + \" in \" + str(datetime.timedelta(seconds=int(dura)))\n msg = MIMEMultipart()\n msg[\"From\"] = sender_email\n msg[\"To\"] = receiver_email\n msg[\"Subject\"] = subject\n msg.attach(MIMEText(body, \"plain\"))\n if send_image == True:\n response = requests.get(\"http://127.0.0.1/webcam/?action=snapshot\") \n with Image.open(BytesIO(response.content)) as img:\n img_bytes = BytesIO()\n img.save(img_bytes, format='JPEG')\n img_bytes = img_bytes.getvalue()\n attachment = MIMEImage(img_bytes)\n attachment.add_header('Content-Disposition', 'attachment', filename='image.jpg')\n msg.attach(attachment)\n smtp_session = smtplib.SMTP(smtp_host, smtp_port)\n smtp_session.starttls()\n smtp_session.login(sender_email, sender_password)\n smtp_session.sendmail(sender_email, receiver_email, msg.as_string())\n smtp_session.quit() \n except Exception:\n pass\n\n\ndef parse_json(json_obj, message):\n global printing\n global gfilename\n if \"print_stats\" in json_obj:\n print_stats = json_obj[\"print_stats\"]\n if \"filename\" in print_stats:\n gfilename = print_stats[\"filename\"] \n if \"state\" in print_stats:\n state = print_stats[\"state\"]\n if state == \"printing\" and printing == False:\n printing = True\n if state == \"complete\":\n send_email(state, gfilename, print_stats[\"print_duration\"])\n printing = False\n if state == \"error\":\n send_email(print_stats[\"message\"], \"ERROR: \", print_stats[\"print_duration\"])\n printing = False \n if state == \"cancelled\":\n printing = False \n\n\ndef on_message(ws, message):\n \n if \"notify_klippy_ready\" in message:\n ws.send(json.dumps(subscribe()))\n if \"Klipper state: Ready\" in message:\n ws.send(json.dumps(subscribe()))\n if \"jsonrpc\" in message:\n python_json_obj = json.loads(message)\n if \"result\" in python_json_obj and \"status\" in python_json_obj[\"result\"]:\n parse_json(python_json_obj[\"result\"][\"status\"], message)\n if \"method\" in python_json_obj and python_json_obj[\"method\"] == \"notify_status_update\":\n parse_json(python_json_obj[\"params\"][0], message)\n \n\n\ndef on_error(ws, error):\n print(\"Error: \" + str(error))\n\n\ndef on_close(ws):\n time.sleep(10)\n connect_websocket() \n\n\ndef on_open(ws):\n def run(*args):\n for i in range(1):\n time.sleep(1)\n ws.send(json.dumps(subscribe()))\n time.sleep(5)\n thread.start_new_thread(run, ())\n\n\ndef connect_websocket():\n def run(*args):\n time.sleep(30)\n ws = websocket.WebSocketApp(f\"ws://127.0.0.1:7125/websocket\", on_message=on_message, on_error=on_error, on_close=on_close)\n ws.on_open = on_open\n ws.run_forever()\n thread.start_new_thread(run, ())\n \nconnect_websocket()\n\n\nwhile True:\n time.sleep(1)","repo_name":"stooged/Klipper-Plugins","sub_path":"non integrated/emailer.py","file_name":"emailer.py","file_ext":"py","file_size_in_byte":5702,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"21130009072","text":"'''\nCreated on 24 de jan de 2017\n\n@author: Fellipe Duarte\n'''\nimport numpy as np\nfrom time import time\nimport pandas as pd\n\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.grid_search import ParameterGrid\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.externals.joblib.parallel import delayed, Parallel\n\nfrom datasets.extractors import short_plagiarised_answers_extractor, pan_plagiarism_corpus_2010_extractor, pan_plagiarism_corpus_2011_extractor\nfrom locality_sensitive_hashing import LSHTransformer, minmax_hashing, LSHIINearestNeighbors, InvertedIndexNearestNeighbors, PBINearestNeighbors, min_hashing, minmaxCSALowerBound_hashing,minmaxCSAFullBound_hashing, justCSALowerBound_hashing, justCSAFullBound_hashing\nfrom scipy.sparse.lil import lil_matrix\nfrom scipy.sparse.csr import csr_matrix\nfrom scipy.sparse.base import issparse\nimport pickle\nimport os.path\nfrom scipy import vstack\nfrom datetime import datetime\n \ndef encode_dataframe_content(dataframe_contenti, encoding):\n return dataframe_contenti.encode(encoding) \n\ndef parameters_gridlist_dataframe(parameters_dict):\n '''\n parameters_dict is a (sklearn) Pipeline parameters dict \n \n returns a (pandas) Dataframe containing a list of parameters' grid from parameters_dict. \n '''\n grid_list = list(ParameterGrid(parameters_dict))\n\n df = pd.DataFrame(grid_list)\n\n# print(grid_list) \n# print(df.to_dict(\"records\"))\n# print(df.describe)\n# exit()\n \n return df\n\ndef h5_results_filename(dataset_name,result_type,dataframe_pos):\n h5_file_path = \"%s_%s_%d_results.h5\"%(dataset_name,result_type,dataframe_pos)\n return h5_file_path\n\ndef sparse_matrix_to_hdf(sparse_matrix,name_to_store,hdf_file_path):\n nonzero_indices = np.nonzero(sparse_matrix>0)\n if len(nonzero_indices[0]) == 0:\n raise Exception(\"can't store empty sparse matrix!\")\n \n if issparse(sparse_matrix):\n if sparse_matrix.__class__ is lil_matrix:\n nonzero_values = sparse_matrix.tocsr()[nonzero_indices].A1\n else:\n nonzero_values = lil_matrix(sparse_matrix).tocsr()[nonzero_indices].A1\n else:\n nonzero_values = np.array(sparse_matrix[nonzero_indices])\n\n# print(sparse_matrix.__class__,'=',name_to_store,sparse_matrix.shape,len(nonzero_values))\n \n matrix_dataframe = pd.DataFrame({\n \"row_indexes\":nonzero_indices[0],\n \"col_indexes\":nonzero_indices[1],\n \"data\":nonzero_values})\n matrix_shape_series = pd.Series(sparse_matrix.shape)\n \n matrix_dataframe.to_hdf(hdf_file_path, name_to_store)\n matrix_shape_series.to_hdf(hdf_file_path, \"%s_shape\"%name_to_store)\n \n del nonzero_indices,nonzero_values,matrix_dataframe,matrix_shape_series\n\ndef hdf_to_sparse_matrix(name_to_load,hdf_file_path):\n matrix_dataframe = pd.read_hdf(hdf_file_path, name_to_load)\n matrix_shape_series = pd.read_hdf(hdf_file_path, \"%s_shape\"%name_to_load)\n\n col = matrix_dataframe.loc[:,'col_indexes'].values.tolist()\n row = matrix_dataframe.loc[:,'row_indexes'].values.tolist()\n data = matrix_dataframe.loc[:,'data'].values.tolist()\n\n# print(np.array(col).shape)\n# print(np.array(row).shape)\n# print(np.array(data).shape)\n# print(matrix_shape_series.values.tolist())\n\n sparse_matrix = csr_matrix((data, (row, col)), shape=matrix_shape_series.values.tolist())\n\n del col,row,data, matrix_dataframe, matrix_shape_series\n \n return sparse_matrix\n\ndef tokenize_by_parameters(documents,queries,target,dataset_name, cv_parameters_dataframe_line,cv_parameters_dataframe_line_index,encoding):\n '''\n tokenize and store results \n '''\n start_time = time()\n file_path = h5_results_filename(dataset_name, 'cv', cv_parameters_dataframe_line_index) #\"%s_cv_%d_results.h5\"%(dataset_name,cv_parameters_dataframe_line_index)\n \n if os.path.exists(file_path):\n print(file_path,' already exists!')\n else:\n print('start:',file_path)\n pipe_to_exec = Pipeline([(\"cv\",TfidfVectorizer(binary=True, encoding=dataset_encoding))])\n parameters = cv_parameters_dataframe_line.to_dict()\n parameters['cv__encoding'] = encoding\n \n pipe_to_exec.set_params(**cv_parameters_dataframe_line)\n \n t0 = time()\n td_documents = pipe_to_exec.fit_transform(documents, None)\n documents_elapsed_time = (time()-t0) / td_documents.shape[0]\n sparse_matrix_to_hdf(td_documents,'documents',file_path)\n print('documents:',td_documents.shape)\n del td_documents\n \n t0 = time()\n td_queries = pipe_to_exec.transform(queries)\n queries_elapsed_time = (time()-t0) / td_queries.shape[0]\n sparse_matrix_to_hdf(td_queries,'queries',file_path)\n print('queries:',td_queries.shape)\n del td_queries\n \n sparse_matrix_to_hdf(target,'targets',file_path)\n \n time_dataframe = pd.DataFrame({\n 'documents_mean_time' : [documents_elapsed_time],\n 'queries_mean_time' : [queries_elapsed_time],\n })\n time_dataframe.to_hdf(file_path.replace('results.h5', 'time.h5'), 'time_dataframe')\n \n del time_dataframe \n with open(file_path.replace('results.h5', 'vocabulary.pkl'),'wb') as f:\n pickle.dump(pipe_to_exec.steps[0][1].vocabulary_,f)\n \n del pipe_to_exec\n \n print('end:',file_path, \"in %4.2f s\"%(time()-start_time)) \n\n d = hdf_to_sparse_matrix('documents', file_path)\n \n for i in range(d.shape[0]):\n a = d[i,:].sum()\n if a ==0 :\n print(i,'/',d.shape[0])\n input('vazio!')\n print(documents[i])\n\n d = hdf_to_sparse_matrix('queries', file_path)\n \n for i in range(d.shape[0]):\n a = d[i,:].sum()\n if a ==0 :\n print(i,'/',d.shape[0])\n input('vazio!')\n print(queries[i])\n \n\ndef lsh_transform(dataset_name, lsht_parameters_dataframe_line, lsth_parameters_dataframe_line_index, encoding):\n indexi = lsht_parameters_dataframe_line['input__filename_index']\n source_file_path = h5_results_filename(dataset_name, 'cv', indexi)\n file_path = h5_results_filename(dataset_name, 'lsht', lsth_parameters_dataframe_line_index)\n \n if os.path.exists(file_path) :\n print(file_path,' already exists!')\n else: \n pipe_to_exec = Pipeline([('lsht',LSHTransformer(n_permutations=1,n_jobs=0))])\n pipe_to_exec.set_params(**lsht_parameters_dataframe_line.drop('input__filename_index'))\n \n print(lsht_parameters_dataframe_line.drop('input__filename_index'))\n \n \n d = hdf_to_sparse_matrix('documents', source_file_path)\n\n d_line,d_time = pipe_to_exec.fit_transform(d, None)\n sparse_matrix_to_hdf(d_line,'documents',file_path) \n sparse_matrix_to_hdf(d_time,'documents_time',file_path.replace('results.h5', 'time.h5'))\n print(d_line.shape, \"in %f[+/-%4.2f] s\"%(d_time.mean(),d_time.std()))\n \n del d,d_line,d_time\n \n q = hdf_to_sparse_matrix('queries', source_file_path)\n\n q_line,q_time = pipe_to_exec.transform(q)\n sparse_matrix_to_hdf(q_line,'queries',file_path)\n sparse_matrix_to_hdf(q_time,'queries_time',file_path.replace('results.h5', 'time.h5'))\n print(q_line.shape, \"in %f[+/-%4.2f] s\"%(q_time.mean(),q_time.std()))\n \n del q,q_line,q_time\n \n t = hdf_to_sparse_matrix('targets', source_file_path)\n sparse_matrix_to_hdf(t,'targets',file_path)\n del t\n\ndef __nearest_neighbors_search(pipe_to_exec,source_file_path,file_path):\n '''\n runs \"pipe_to_exec\" nearest neighbors search estimator\n \n parameters: \n \n * source_file_path : hdf file in which input documents, queries and targets are stored\n * file_path: hdf filename where nns results will be stored\n '''\n \n# print(linei.describe)\n \n d = hdf_to_sparse_matrix('documents', source_file_path)\n pipe_to_exec.fit(d, None)\n d_mean_time = pipe_to_exec.steps[0][1].fit_time\n \n print(\"fitted in %f s\"%(d_mean_time))\n \n del d\n \n q = hdf_to_sparse_matrix('queries', source_file_path)\n d_indices,qd_distances,q_mean_time = pipe_to_exec.transform(q)\n \n# print(\"mean retrieval time %f s\"%(q_mean_time))\n \n time_dataframe = pd.DataFrame({\n 'documents_mean_time' : [d_mean_time],\n 'queries_mean_time' : [q_mean_time],\n })\n \n '''\n storing nearest neighbors search results\n '''\n time_dataframe.to_hdf(file_path.replace('results.h5', 'time.h5'), 'time_dataframe')\n sparse_matrix_to_hdf(d_indices,'retrieved_docs',file_path)\n sparse_matrix_to_hdf(lil_matrix(qd_distances),'qd_distances',file_path)\n \n del q, d_mean_time, q_mean_time, qd_distances, time_dataframe\n \n '''\n Evaluating results in terms of Precision, Recalls and MAP.\n '''\n\n t = hdf_to_sparse_matrix('targets', source_file_path)\n \n retrieved_relevants = []\n for q_index in range(d_indices.shape[0]):\n q_retrieved_relevants = np.cumsum(t[q_index,d_indices[q_index,:]].A,axis=1)\n retrieved_relevants.append(q_retrieved_relevants)\n \n retrieved_relevants = vstack(retrieved_relevants)\n \n '''\n broadcasting\n ''' \n approachi_recalls = np.divide(retrieved_relevants,np.matrix(t.sum(axis=1)))\n ranking_sum = np.multiply(np.ones(retrieved_relevants.shape),np.matrix(range(1,retrieved_relevants.shape[1]+1)))\n approachi_precisions = np.divide(retrieved_relevants,ranking_sum)\n \n average_precision = np.zeros((d_indices.shape[0],1))\n for q_index in range(d_indices.shape[0]):\n relevants_precision = np.multiply(approachi_precisions[q_index,:],t[q_index,d_indices[q_index,:]].A)\n average_precision[q_index,0] = relevants_precision.mean(axis=1)\n# print(q_index,'.MAP =',average_precision[q_index,0]) \n\n# print(t.sum(axis=1))\n# print(retrieved_relevants)\n del d_indices, retrieved_relevants\n\n# print(\"MAP=\",average_precision.mean(),average_precision.std(),'precision.sum=',average_precision.sum())\n# print(\"recalls.sum = \",approachi_recalls.sum(),'| mean = ',approachi_recalls.sum()/(approachi_recalls.shape[0]*approachi_recalls.shape[1]))\n \n for to_store,to_store_name in [(approachi_precisions,'precisions'),(approachi_recalls,'recalls'),(average_precision,'average_precisions')]:\n if not issparse(to_store):\n to_store = csr_matrix(to_store)\n sparse_matrix_to_hdf(to_store,to_store_name,file_path.replace('results','results_evaluation'))\n \n del to_store\n\ndef lsh_nearest_neighbors_search(dataset_name, lshnns_parameters_dataframe_line, lshnns_parameters_dataframe_line_index, encoding):\n indexi = lshnns_parameters_dataframe_line['input__filename_index']\n source_file_path = h5_results_filename(dataset_name, 'lsht', indexi)\n file_path = h5_results_filename(dataset_name, 'lshnns', lshnns_parameters_dataframe_line_index)\n \n print(lshnns_parameters_dataframe_line)\n\n if os.path.exists(file_path):\n# if os.path.exists(file_path):\n print(file_path,' already exists!')\n else: \n pipe_to_exec = Pipeline([('lshnns',LSHIINearestNeighbors(n_neighbors=2))])\n pipe_to_exec.set_params(**lshnns_parameters_dataframe_line.drop('input__filename_index'))\n \n __nearest_neighbors_search(pipe_to_exec, source_file_path, file_path)\n\ndef pbinearest_neighbors_search(dataset_name, nns_parameters_dataframe_line, nns_parameters_dataframe_line_index, encoding):\n indexi = nns_parameters_dataframe_line['input__filename_index']\n source_file_path = h5_results_filename(dataset_name, 'cv', indexi)\n file_path = h5_results_filename(dataset_name, 'pbinns', nns_parameters_dataframe_line_index)\n\n if os.path.exists(file_path):\n print(file_path,' already exists!')\n else: \n pipe_to_exec = Pipeline([('pbinns',PBINearestNeighbors(bucket_count=2, reference_set_size=2, prunning_size=1, ref_sel_threshold=0.5, n_neighbors=2, sort_neighbors=False))])\n pipe_to_exec.set_params(**nns_parameters_dataframe_line.drop('input__filename_index'))\n \n __nearest_neighbors_search(pipe_to_exec, source_file_path, file_path)\n \ndef nearest_neighbors_search(dataset_name, nns_parameters_dataframe_line, nns_parameters_dataframe_line_index, encoding):\n indexi = nns_parameters_dataframe_line['input__filename_index']\n source_file_path = h5_results_filename(dataset_name, 'cv', indexi)\n file_path = h5_results_filename(dataset_name, 'nns', nns_parameters_dataframe_line_index)\n\n# print(nns_parameters_dataframe_line)\n \n if os.path.exists(file_path) :\n print(file_path,' already exists!')\n else: \n pipe_to_exec = Pipeline([('nns',InvertedIndexNearestNeighbors(n_neighbors=2))])\n pipe_to_exec.set_params(**nns_parameters_dataframe_line.drop('input__filename_index'))\n\n __nearest_neighbors_search(pipe_to_exec, source_file_path, file_path)\n\ndef generate_or_load_parameters_grids(parameters_sequence,dataset_name):\n parameters_grids_list = []\n\n for i in range(len(parameters_sequence)):\n sufix,_parameters = parameters_sequence[i]\n dataframe_df_path = \"%s_parameters_%s.h5\"%(dataset_name,sufix)\n \n dataframe_df_paramaters = parameters_gridlist_dataframe(_parameters).drop_duplicates()\n\n '''\n search and add new parameters!\n '''\n if os.path.exists(dataframe_df_path):\n print(dataframe_df_path,'exists! merging with new values!')\n\n current_df = pd.read_hdf(dataframe_df_path, \"parameters\")\n \n '''\n getting what already exists!\n '''\n\n intersection = pd.merge(current_df,dataframe_df_paramaters)\n \n \n if intersection.shape[0] > 0:\n for _,rowi in dataframe_df_paramaters.iterrows():\n inter_i = pd.merge(pd.DataFrame([rowi]),intersection)\n \n if inter_i.shape[0] == 0:\n temp = pd.DataFrame([rowi])\n temp.index =[current_df.shape[0]]\n current_df = current_df.append(temp)\n else:\n temp = dataframe_df_paramaters\n temp.index = list(range(current_df.shape[0],current_df.shape[0] + dataframe_df_paramaters.shape[0]))\n# print(temp.index)\n current_df = current_df.append(temp)\n else:\n current_df = dataframe_df_paramaters\n\n current_df.to_hdf(dataframe_df_path, \"parameters\")\n \n '''\n preserving original index! \n '''\n current_df.loc[list(current_df.index),'%s_file_index'%(sufix)] = list(current_df.index)\n parameters_grids_list.append(pd.merge(current_df,dataframe_df_paramaters))\n parameters_grids_list[-1] = parameters_grids_list[-1].set_index('%s_file_index'%(sufix),drop=True)\n \n if i < len(parameters_sequence) - 1:\n parameters_sequence[i+1][1]['input__filename_index'] = parameters_grids_list[-1].index\n \n return parameters_grids_list\n\n\n\nif __name__ == '__main__':\n \n '''\n creating TfIdfVectorizer, LSHTransformer and LSHIINearestNeighbors parameters grids and\n storing it as pandas Dataframes on hdf\n '''\n# dataset_name = \"psa\"\n# dataset_name = \"pan10\"\n dataset_name = \"pan11\"\n\n# dataset_name,sample_size = \"pan10-%d-samples\",10 \n# dataset_name = dataset_name%(sample_size)\n \n cv_parameters = {\n \"cv__analyzer\" : ('word',),\n \"cv__ngram_range\" : (\n (1,1),\n# (3,3), \n# (5,5), \n# (1,3),\n# (3,5),\n ),\n \"cv__tokenizer\" : (\n None,\n ),\n \"cv__lowercase\" : (True,),\n \"cv__min_df\" : (\n# 1,\n 2, \n ),\n \"cv__binary\" : (False,),\n \"cv__stop_words\" : ('english',),\n \"cv__use_idf\" : (True,),\n \"cv__norm\" : (\n 'l1',\n# None,'l2'\n ),\n \n }\n \n lsht_parameters = {\n \"lsht__n_permutations\" : (\n 1,\n# 48, 96, 192, 384, 768, # min\n# 24, 48, 96, 192, 384, #minmax\n# 16, 32, 64, 128, 256, #csa_l\n# 12, 24, 48, 96, 192, #csa\n ),\n \"lsht__selection_function\" : (\n min_hashing,\n# minmax_hashing,\n# minmaxCSALowerBound_hashing,\n# minmaxCSAFullBound_hashing,\n# justCSALowerBound_hashing,\n# justCSAFullBound_hashing,\n ),\n \"lsht__n_jobs\" : (\n# 1,\n# 2,\n -1,\n ) \n }\n \n lshnns_parameters = {\n \"lshnns__n_neighbors\" : (\n# 5,\n 5,10\n# 22,10,\n# 160, 1597, 3991, 7983, 11975, 15966 # PAN11(EN, just queries with relevants) 1%, 10%, 25%, 50%, 75%, 100%\n# 1597, 3991, 7983, 11975 # PAN11(EN, just queries with relevants) 10%, 25%, 50%, 75%\n# ,\n# 15,30\n# 15,22\n ),\n \"lshnns__sort_neighbors\" : (\n False,\n# True,\n ),\n \n }\n \n nns_parameters = {\n \"nns__n_neighbors\" : lshnns_parameters['lshnns__n_neighbors'],\n \"nns__sort_neighbors\" : lshnns_parameters['lshnns__sort_neighbors'],\n }\n\n pbinns_parameters = {\n \"pbinns__n_neighbors\" : nns_parameters['nns__n_neighbors'],\n \"pbinns__sort_neighbors\" : nns_parameters['nns__sort_neighbors'],\n \"pbinns__bucket_count\" : (2,),\n \"pbinns__reference_set_size\" : (40,),\n \"pbinns__prunning_size\" : (10,),\n \"pbinns__ref_sel_threshold\" : (0.1, #0.2, 0.4, 0.5 \n ),\n }\n\n\n '''\n storing parameters dataframes \n '''\n\n parameters_sequence = [('cv',cv_parameters),('lsht',lsht_parameters),('lshnns',lshnns_parameters)]\n parameters_grids_list = generate_or_load_parameters_grids(parameters_sequence,dataset_name)\n\n cv_df_paramaters, lsht_df_paramaters, lshnns_df_paramaters = parameters_grids_list\n\n# for i in parameters_grids_list:\n# print(i) \n# print('xxxxxxxxxxxxxxxxxxxx')\n# exit()\n \n '''\n nearest neighbor search without LSH\n ''' \n parameters_sequence = [('cv',cv_parameters),('nns',nns_parameters)]\n parameters_grids_list = generate_or_load_parameters_grids(parameters_sequence,dataset_name)\n\n nns_df_paramaters = parameters_grids_list[1]\n\n# for i in parameters_grids_list:\n# print(i) \n# print('xxxxxxxxxxxxxxxxxxxx')\n# exit()\n\n '''\n permutation-Based Index(PBI) nearest neighbor search\n ''' \n\n parameters_sequence = [('cv',cv_parameters),('pbinns',pbinns_parameters)]\n parameters_grids_list = generate_or_load_parameters_grids(parameters_sequence,dataset_name)\n\n pbinns_df_paramaters = parameters_grids_list[1]\n \n# for i in parameters_grids_list:\n# print(i) \n# print('xxxxxxxxxxxxxxxxxxxx')\n# exit()\n \n\n '''\n dataset extraction\n '''\n if dataset_name == \"psa\":\n corpus_name, (suspicious_info, source_info,target, dataset_encoding) = dataset_name, short_plagiarised_answers_extractor.load_as_ir_task()\n elif dataset_name == \"pan10\":\n corpus_name, (suspicious_info, source_info,target, dataset_encoding) = dataset_name, pan_plagiarism_corpus_2010_extractor.load_as_ir_task(allow_queries_without_relevants=False)\n elif dataset_name == \"pan11\":\n corpus_name, (suspicious_info, source_info,target, dataset_encoding) = dataset_name, pan_plagiarism_corpus_2011_extractor.load_as_ir_task(allow_queries_without_relevants=False, language_filter=\"EN\")\n elif \"pan10\" in dataset_name and \"-samples\" in dataset_name:\n corpus_name, (suspicious_info, source_info,target, dataset_encoding) = dataset_name, pan_plagiarism_corpus_2010_extractor.load_sample_as_ir_task(sample_size, language_filter=\"EN\")\n\n print('queries:',suspicious_info.shape,' Documents:',source_info.shape)\n\n\n documents = Parallel(n_jobs=-1,backend=\"threading\",verbose=1)(delayed(encode_dataframe_content)(si, dataset_encoding) for si in source_info['content'].values)\n queries = Parallel(n_jobs=-1,backend=\"threading\",verbose=1)(delayed(encode_dataframe_content)(si, dataset_encoding) for si in suspicious_info['content'].values)\n \n del suspicious_info, source_info\n \n print(nns_df_paramaters)\n print(lsht_df_paramaters) \n \n# exit()\n \n '''\n using scikit-learn : tokenization\n '''\n\n for i,linei in cv_df_paramaters.iterrows():\n tokenize_by_parameters(documents,queries,target,dataset_name,linei,i,dataset_encoding)\n\n queries_count,documents_count = target.shape\n del documents, queries, target\n \n '''\n transforming bow to lsh representations (min-hasing, minmax-hashing, ...)\n '''\n\n for i,linei in lsht_df_paramaters.iterrows():\n print(linei)\n print('xxxxxx')\n lsh_transform(dataset_name,linei,i,dataset_encoding)\n\n '''\n nearest neighbor search (ranking)\n '''\n\n for i,linei in lshnns_df_paramaters.iterrows():\n print(\"#\"*10+\" LSH N.N.S. \"+\"#\"*10)\n print(linei)\n lsh_nearest_neighbors_search(dataset_name,linei,i,dataset_encoding)\n print(\"-\"*20)\n\n for i,linei in nns_df_paramaters.iterrows():\n print(\"#\"*10+\" N.N.S. \"+\"#\"*10)\n print(linei)\n nearest_neighbors_search(dataset_name,linei,i,dataset_encoding)\n print(\"-\"*20)\n \n for i,linei in pbinns_df_paramaters.iterrows():\n print(\"#\"*10+\" PBI N.N.S. \"+\"#\"*10)\n print(linei)\n pbinearest_neighbors_search(dataset_name,linei,i,dataset_encoding)\n print(\"-\"*20)\n \n today = datetime.now()\n today = today.strftime('%Y-%m-%d_%H-%M-%S_')\n \n \n '''\n logging LSH nearest neighbors results on csv\n '''\n a = pd.merge(cv_df_paramaters, lsht_df_paramaters, how='inner', left_index=True, right_on=['input__filename_index',],)\n b = pd.merge(a, lshnns_df_paramaters, how='inner', left_index=True, right_on=['input__filename_index',],suffixes=('_lsht','_lshtnns'))\n del a\n\n for rowi in b.iterrows():\n cv_index = rowi[1]['input__filename_index_lsht']\n lsht_index = rowi[1]['input__filename_index_lshtnns']\n nns_index = rowi[0]\n \n# print(cv_index,'-',lsht_index,'-',nns_index)\n cv_file_path = h5_results_filename(dataset_name, 'cv', cv_index).replace('results','time')\n lsht_file_path = h5_results_filename(dataset_name, 'lsht', lsht_index).replace('results','time')\n lshnns_file_path = h5_results_filename(dataset_name, 'lshnns', nns_index).replace('results','results_evaluation')\n lshnns_time_file_path = h5_results_filename(dataset_name, 'lshnns', nns_index).replace('results','time')\n\n# print('\\t',cv_file_path)\n# print('\\t',lsht_file_path)\n# print('\\t',lshnns_file_path)\n# print('=========')\n approach_precisions = hdf_to_sparse_matrix('precisions', lshnns_file_path)\n approach_recalls = hdf_to_sparse_matrix('recalls', lshnns_file_path)\n average_precision = hdf_to_sparse_matrix('average_precisions', lshnns_file_path).todense()\n \n b.loc[nns_index,'MAP'] = average_precision.mean()\n b.loc[nns_index,'MAP_std'] = average_precision.std()\n b.loc[nns_index,'precision_recall_path'] = lshnns_file_path\n b.loc[nns_index,'recall_mean'] = approach_recalls[:,-1].todense().mean()\n b.loc[nns_index,'recall_std'] = approach_recalls[:,-1].todense().std()\n b.loc[nns_index,'precision_mean'] = approach_precisions[:,-1].todense().mean()\n b.loc[nns_index,'precision_std'] = approach_precisions[:,-1].todense().std()\n \n del approach_precisions, approach_recalls, average_precision\n\n b.loc[nns_index,'documents_count'] = documents_count\n b.loc[nns_index,'queries_count'] = queries_count\n \n with open(cv_file_path.replace('time.h5', 'vocabulary.pkl'),'rb') as f:\n b.loc[nns_index,'vocabulary_size'] = len(pickle.load(f))\n\n q = hdf_to_sparse_matrix('queries', lsht_file_path.replace('time','results'))\n b.loc[nns_index,'lsht_features'] = q.shape[1]\n del q\n \n b.loc[nns_index,'indexing_mean_time'] = 0\n b.loc[nns_index,'querying_mean_time'] = 0\n \n cv_time_dataframe = pd.read_hdf(cv_file_path, 'time_dataframe')\n b.loc[nns_index,'cv_documents_mean_time'] = cv_time_dataframe.loc[0,'documents_mean_time'] \n b.loc[nns_index,'cv_queries_mean_time'] = cv_time_dataframe.loc[0,'queries_mean_time']\n\n b.loc[nns_index,'indexing_mean_time'] += b.loc[nns_index,'cv_documents_mean_time']\n b.loc[nns_index,'querying_mean_time'] += b.loc[nns_index,'cv_queries_mean_time']\n del cv_time_dataframe \n\n d_time = hdf_to_sparse_matrix('documents_time',lsht_file_path)\n q_time = hdf_to_sparse_matrix('queries_time',lsht_file_path)\n \n b.loc[nns_index,'lsht_documents_mean_time'] = d_time.sum(axis=1).mean() \n b.loc[nns_index,'lsht_queries_mean_time'] = q_time.sum(axis=1).mean()\n\n b.loc[nns_index,'indexing_mean_time'] += b.loc[nns_index,'lsht_documents_mean_time']\n b.loc[nns_index,'querying_mean_time'] += b.loc[nns_index,'lsht_queries_mean_time']\n del d_time, q_time \n\n nns_time_dataframe = pd.read_hdf(lshnns_time_file_path, 'time_dataframe')\n b.loc[nns_index,'nns_documents_mean_time'] = nns_time_dataframe.loc[0,'documents_mean_time'] \n b.loc[nns_index,'nns_queries_mean_time'] = nns_time_dataframe.loc[0,'queries_mean_time']\n\n b.loc[nns_index,'indexing_mean_time'] += b.loc[nns_index,'nns_documents_mean_time']\n b.loc[nns_index,'querying_mean_time'] += b.loc[nns_index,'nns_queries_mean_time']\n del nns_time_dataframe\n\n print(b.loc[nns_index,'lsht__selection_function'],' : ',int(b.loc[nns_index,'lsht_features']),' features x ',b.loc[nns_index,'lsht__n_permutations'],' permutation')\n print(\"MAP = %4.2f[+-%4.2f]\"%(b.loc[nns_index,'MAP'],b.loc[nns_index,'MAP_std']))\n print(\"recall = %4.2f[+-%4.2f]\"%(b.loc[nns_index,'recall_mean'],b.loc[nns_index,'recall_std']))\n print(\"index time = %4.4f\"%(b.loc[nns_index,'cv_documents_mean_time']+b.loc[nns_index,'lsht_documents_mean_time']+b.loc[nns_index,'nns_documents_mean_time']))\n print(\"query time = %4.4f\"%(b.loc[nns_index,'cv_queries_mean_time']+b.loc[nns_index,'lsht_queries_mean_time']+b.loc[nns_index,'nns_queries_mean_time']))\n \n print(\"---->\",b.loc[nns_index,'indexing_mean_time'])\n b.to_csv('%s%s_lsh_results.csv'%(today,dataset_name),sep='\\t')\n \n del b\n \n \n '''\n logging nearest neighbors (without LSH) results on csv\n '''\n b = pd.merge(cv_df_paramaters, nns_df_paramaters, how='inner', left_index=True, right_on=['input__filename_index',],)\n \n for rowi in b.iterrows():\n cv_index = rowi[1]['input__filename_index']\n nns_index = rowi[0]\n \n print(cv_index,'-',lsht_index,'-',nns_index)\n cv_file_path = h5_results_filename(dataset_name, 'cv', cv_index).replace('results','time')\n nns_file_path = h5_results_filename(dataset_name, 'nns', nns_index).replace('results','results_evaluation')\n nns_time_file_path = h5_results_filename(dataset_name, 'nns', nns_index).replace('results','time')\n \n# print('\\t',cv_file_path)\n# print('\\t',lsht_file_path)\n# print('\\t',lshnns_file_path)\n# print('=========')\n approach_precisions = hdf_to_sparse_matrix('precisions', nns_file_path)\n approach_recalls = hdf_to_sparse_matrix('recalls', nns_file_path)\n average_precision = hdf_to_sparse_matrix('average_precisions', nns_file_path).todense()\n \n b.loc[nns_index,'MAP'] = average_precision.mean()\n b.loc[nns_index,'MAP_std'] = average_precision.std()\n b.loc[nns_index,'recall_mean'] = approach_recalls[:,-1].todense().mean()\n b.loc[nns_index,'recall_std'] = approach_recalls[:,-1].todense().std()\n b.loc[nns_index,'precision_mean'] = approach_precisions[:,-1].todense().mean()\n b.loc[nns_index,'precision_std'] = approach_precisions[:,-1].todense().std()\n \n del approach_precisions, approach_recalls, average_precision\n \n b.loc[nns_index,'documents_count'] = documents_count\n b.loc[nns_index,'queries_count'] = queries_count\n \n with open(cv_file_path.replace('time.h5', 'vocabulary.pkl'),'rb') as f:\n b.loc[nns_index,'vocabulary_size'] = len(pickle.load(f))\n \n b.loc[nns_index,'indexing_mean_time'] = 0\n b.loc[nns_index,'querying_mean_time'] = 0\n \n cv_time_dataframe = pd.read_hdf(cv_file_path, 'time_dataframe')\n b.loc[nns_index,'cv_documents_mean_time'] = cv_time_dataframe.loc[0,'documents_mean_time'] \n b.loc[nns_index,'cv_queries_mean_time'] = cv_time_dataframe.loc[0,'queries_mean_time']\n \n b.loc[nns_index,'indexing_mean_time'] += b.loc[nns_index,'cv_documents_mean_time']\n b.loc[nns_index,'querying_mean_time'] += b.loc[nns_index,'cv_queries_mean_time']\n del cv_time_dataframe \n \n nns_time_dataframe = pd.read_hdf(nns_time_file_path, 'time_dataframe')\n b.loc[nns_index,'nns_documents_mean_time'] = nns_time_dataframe.loc[0,'documents_mean_time'] \n b.loc[nns_index,'nns_queries_mean_time'] = nns_time_dataframe.loc[0,'queries_mean_time']\n \n b.loc[nns_index,'indexing_mean_time'] += b.loc[nns_index,'nns_documents_mean_time']\n b.loc[nns_index,'querying_mean_time'] += b.loc[nns_index,'nns_queries_mean_time']\n del nns_time_dataframe \n \n print('nns:')\n print(\"MAP = %4.2f[+-%4.2f]\"%(b.loc[nns_index,'MAP'],b.loc[nns_index,'MAP_std']))\n print(\"recall = %4.2f[+-%4.2f]\"%(b.loc[nns_index,'recall_mean'],b.loc[nns_index,'recall_std']))\n print(\"index time = %4.4f\"%(b.loc[nns_index,'cv_documents_mean_time']+b.loc[nns_index,'nns_documents_mean_time']))\n print(\"query time = %4.4f\"%(b.loc[nns_index,'cv_queries_mean_time']+b.loc[nns_index,'nns_queries_mean_time']))\n \n b.to_csv('%s%s_results.csv'%(today,dataset_name),sep='\\t') \n \n \n '''\n Permutation-Based Index (PBI) logging nearest neighbors results on csv\n '''\n b = pd.merge(cv_df_paramaters, pbinns_df_paramaters, how='inner', left_index=True, right_on=['input__filename_index',],)\n \n for rowi in b.iterrows():\n cv_index = rowi[1]['input__filename_index']\n pbinns_index = rowi[0]\n \n# print(cv_index,'-',lsht_index,'-',nns_index)\n cv_file_path = h5_results_filename(dataset_name, 'cv', cv_index).replace('results','time')\n pbinns_file_path = h5_results_filename(dataset_name, 'pbinns', pbinns_index).replace('results','results_evaluation')\n pbinns_time_file_path = h5_results_filename(dataset_name, 'pbinns', pbinns_index).replace('results','time')\n \n# print('\\t',cv_file_path)\n# print('\\t',lsht_file_path)\n# print('\\t',lshnns_file_path)\n# print('=========')\n approach_precisions = hdf_to_sparse_matrix('precisions', pbinns_file_path)\n approach_recalls = hdf_to_sparse_matrix('recalls', pbinns_file_path)\n average_precision = hdf_to_sparse_matrix('average_precisions', pbinns_file_path).todense()\n \n b.loc[pbinns_index,'MAP'] = average_precision.mean()\n b.loc[pbinns_index,'MAP_std'] = average_precision.std()\n b.loc[pbinns_index,'recall_mean'] = approach_recalls[:,-1].todense().mean()\n b.loc[pbinns_index,'recall_std'] = approach_recalls[:,-1].todense().std()\n b.loc[pbinns_index,'precision_mean'] = approach_precisions[:,-1].todense().mean()\n b.loc[pbinns_index,'precision_std'] = approach_precisions[:,-1].todense().std()\n \n del approach_precisions, approach_recalls, average_precision\n \n b.loc[pbinns_index,'documents_count'] = documents_count\n b.loc[pbinns_index,'queries_count'] = queries_count\n \n with open(cv_file_path.replace('time.h5', 'vocabulary.pkl'),'rb') as f:\n b.loc[pbinns_index,'vocabulary_size'] = len(pickle.load(f))\n \n b.loc[nns_index,'indexing_mean_time'] = 0\n b.loc[nns_index,'querying_mean_time'] = 0\n \n cv_time_dataframe = pd.read_hdf(cv_file_path, 'time_dataframe')\n b.loc[pbinns_index,'cv_documents_mean_time'] = cv_time_dataframe.loc[0,'documents_mean_time'] \n b.loc[pbinns_index,'cv_queries_mean_time'] = cv_time_dataframe.loc[0,'queries_mean_time']\n \n b.loc[nns_index,'indexing_mean_time'] += b.loc[pbinns_index,'cv_documents_mean_time'] \n b.loc[nns_index,'querying_mean_time'] += b.loc[pbinns_index,'cv_queries_mean_time']\n del cv_time_dataframe \n \n pbinns_time_dataframe = pd.read_hdf(pbinns_time_file_path, 'time_dataframe')\n b.loc[pbinns_index,'pbinns_documents_mean_time'] = pbinns_time_dataframe.loc[0,'documents_mean_time'] \n b.loc[pbinns_index,'pbinns_queries_mean_time'] = pbinns_time_dataframe.loc[0,'queries_mean_time']\n \n b.loc[nns_index,'indexing_mean_time'] += b.loc[pbinns_index,'pbinns_documents_mean_time'] \n b.loc[nns_index,'querying_mean_time'] += b.loc[pbinns_index,'pbinns_queries_mean_time']\n del pbinns_time_dataframe \n \n print('pbinns:')\n print(\"MAP = %4.2f[+-%4.2f]\"%(b.loc[pbinns_index,'MAP'],b.loc[pbinns_index,'MAP_std']))\n print(\"recall = %4.2f[+-%4.2f]\"%(b.loc[pbinns_index,'recall_mean'],b.loc[pbinns_index,'recall_std']))\n print(\"index time = %4.4f\"%(b.loc[pbinns_index,'cv_documents_mean_time']+b.loc[pbinns_index,'pbinns_documents_mean_time']))\n print(\"query time = %4.4f\"%(b.loc[pbinns_index,'cv_queries_mean_time']+b.loc[pbinns_index,'pbinns_queries_mean_time']))\n \n b.to_csv('%s%s_pbi_results.csv'%(today,dataset_name),sep='\\t') ","repo_name":"duartefellipe/minmaxcsa","sub_path":"src/plagiarism_detection/extermal_plagiarism/heuristic_retrieval.py","file_name":"heuristic_retrieval.py","file_ext":"py","file_size_in_byte":35394,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"94"} +{"seq_id":"5979856386","text":"from ability import Ability\nfrom weapon import Weapon\nfrom armor import Armor\nfrom hero import Hero\nfrom team import Team\n\nclass Arena:\n def __init__(self):\n '''\n Instantiate properties\n team_one: None\n team_two: None\n '''\n self.team_one = Team(\"Team One\")\n self.team_two = Team(\"Team Two\")\n\n def create_ability(self):\n '''\n Prompt for ability information.\n return ability with values from user input\n '''\n ability_name = input(\"What is the ability name? > \")\n max_damage = int(input(\"What is the max damage of the ability? > \"))\n return Ability(ability_name, max_damage)\n\n def create_weapon(self):\n '''\n Prompt for weapon information.\n return weapon with values from user input\n '''\n weapon_name = input(\"What is the weapon name? > \")\n max_damage = int(input(\"What is the max damage of the weapon? > \"))\n return Weapon(weapon_name, max_damage)\n\n def create_armor(self):\n '''\n Prompt for armor information.\n return armor with values from user input\n '''\n armor_name = input(\"What is the armor name? > \")\n max_damage = int(input(\"What is the max block value of the armor? > \"))\n return Armor(armor_name, max_damage)\n\n def create_hero(self):\n '''\n Prompt for hero information.\n return hero with values from user input\n '''\n hero_name = input(\"Hero's name > \")\n hero = Hero(hero_name)\n add_item = None\n while add_item != \"4\":\n add_item = input(\"[1] Add ability\\n[2] Add weapon\\n[3] Add armor\\n[4] Done adding items\\n\\nYour choice > \")\n if add_item == \"1\":\n new_ability = self.create_ability()\n hero.add_ability(new_ability)\n elif add_item == \"2\":\n new_weapon = self.create_weapon()\n hero.add_ability(new_weapon)\n elif add_item == \"3\":\n new_armor = self.create_armor()\n hero.add_armor(new_armor)\n return hero\n\n def build_team_one(self):\n '''\n Prompt the user to build team_one\n '''\n num_of_team_members = int(input(\"How many members would you like on Team One? > \"))\n for i in range(num_of_team_members):\n hero = self.create_hero()\n self.team_one.add_hero(hero)\n\n def build_team_two(self):\n '''\n Prompt the user to build team_one\n '''\n num_of_team_members = int(input(\"How many members would you like on Team Two? > \"))\n for i in range(num_of_team_members):\n hero = self.create_hero()\n self.team_two.add_hero(hero)\n\n def team_battle(self):\n '''\n Battle team_one and team_two together\n '''\n self.team_one.attack(self.team_two)\n\n def show_stats(self):\n '''\n Prints team statistics to the terminal\n '''\n print()\n print(self.team_one.team_name + \" statistics: \")\n self.team_one.stats()\n print()\n print(self.team_two.team_name + \" statistics: \")\n self.team_two.stats()\n print()\n\n team_kills = 0\n team_deaths = 0\n team_survivors = list()\n for hero in self.team_one.heroes:\n team_kills += hero.kills\n team_deaths += hero.deaths\n if hero.is_alive():\n team_survivors.append(hero.hero_name)\n if team_deaths == 0: \n team_deaths = 1\n print(self.team_one.team_name + \" average K/D ratio was: \" + str(team_kills/team_deaths))\n if len(team_survivors) > 0:\n print(f\"The surviving members of {arena.team_one.team_name} are: {team_survivors}\")\n\n\n team_kills = 0\n team_deaths = 0\n team_survivors = list()\n for hero in self.team_two.heroes:\n team_kills += hero.kills\n team_deaths += hero.deaths\n if hero.is_alive():\n team_survivors.append(hero.hero_name) \n if team_deaths == 0:\n team_deaths = 1\n print(self.team_two.team_name + \" average K/D ratio was: \" + str(team_kills/team_deaths))\n if len(team_survivors) > 0:\n print(f\"The surviving members of {arena.team_two.team_name} are: {team_survivors}\")\n\nif __name__ == \"__main__\":\n game_is_running = True\n\n # Instantiate Game Arena\n arena = Arena()\n\n #Build Teams\n arena.build_team_one()\n arena.build_team_two()\n\n while game_is_running:\n\n arena.team_battle()\n arena.show_stats()\n play_again = input(\"Play Again? Y or N: \")\n\n #Check for Player Input\n if play_again.lower() == \"n\":\n game_is_running = False\n\n else:\n #Revive heroes to play again\n arena.team_one.revive_heroes()\n arena.team_two.revive_heroes()\n\n\n","repo_name":"energeist/acs-1111-superhero-dueler","sub_path":"arena.py","file_name":"arena.py","file_ext":"py","file_size_in_byte":4930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"23970737786","text":"from fractions import Fraction as F\n\n\ndef main():\n num_result = 1\n den_result = 1\n\n for num in range(10, 99):\n for den in range(num + 1, 100):\n LS = len(set(str(num) + str(den)))\n if LS != 3 or num % 10 == den % 10 == 0:\n continue\n\n frac = F(num, den)\n\n try:\n if str(num)[1] == str(den)[0]:\n sw_frac = F(int(str(num)[0]), int(str(den)[1]))\n\n else:\n sw_frac = F(int(str(num)[1]), int(str(den)[0]))\n\n if sw_frac == frac:\n num_result *= frac.numerator\n den_result *= frac.denominator\n\n except ZeroDivisionError:\n continue\n\n result = F(num_result, den_result).denominator\n print(f\"Answer is {result}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"micahyoung324/Project-Euler","sub_path":"0001-0100/0033/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"16800771492","text":"# Write a UDF in python, it will take two arguments list(sequence of elements)\n# and its size.\tFunction arrange and display elements in ascending order.\n# Using selection sort.\n\ndef insertion_sort(list1):\n n = len(list1)\n for i in range(1,n):\n temp = list1[i]\n j = i-1\n while j>=0 and temp 20):\n break;\n\nfor counter, item in enumerate(open(settings.dict['FILES_PATH'] + os.sep + settings.dict['year'] + os.sep + settings.dict['inputTerm1'] + \"-sortedByCosine.txt\")):\n itemSplit_end = item.split(\"\\t\")\n termName_end = itemSplit_end[0].lower()\n endInputTermNearestNeighbor.append(termName_end)\n if (counter > 20):\n break;\n\nprint(\"startInputTermNearestNeighbor: \", len(startInputTermNearestNeighbor))\nprint(\"endInputTermNearestNeighbor: \", len(endInputTermNearestNeighbor))\n\npossibleBTerms = []\nfor counter, item in enumerate(open(settings.dict['FILES_PATH']+os.sep+str(int(settings.dict['year'])-1)+os.sep+\"final-sortedBTermsByCosine.txt\")):\n split_item = item.split(\"\\t\")\n possibleBTerms.append(split_item[0].lower())\n\nprint(\"Possible B Terms: \", len(possibleBTerms))\n\nglobaltransformationMatrixObj = globaltransformationMatrix(settings.dict['yearBase'], settings.dict['yearTarget'], settings.dict['totalFrequentPairs'])\nglobaltransformationMatrixObj.generateTransformationMatrix()\n# bTermsMatrix = np.empty([len(possibleBTerms), 300])\n# for counter1, item1 in enumerate(possibleBTerms):\n# # print(\"B term:\",item)\n# path = pathlib.Path(settings.dict['ROOT_PATH'] + os.sep + \"invertedIndex\" + os.sep + str(int(settings.dict['year'])-1) + os.sep + item1.lower())\n#\n# if path.exists():\n# embedding1 = nearestNeighbour.getEmbeddings(item1)\n# # print(embedding1)\n# # learnTransformationMatrix_M = globaltransformationMatrix.globalTransformationMatrix()\n# bTermsMatrix[counter1] = embedding1\n#\n# print(\"BTermsMatrix: \", bTermsMatrix.shape)\n","repo_name":"kishlayjha/Concepts-Bridges","sub_path":"transformation_main.py","file_name":"transformation_main.py","file_ext":"py","file_size_in_byte":2846,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"94"} +{"seq_id":"13139385591","text":"import os\nimport re\nimport string\nimport nltk\nfrom nltk.stem.porter import PorterStemmer\n# nltk.download('punkt') # Uncomment this the first time you run the program\n\ndef removeNumbers(words):\n pattern = '[0-9]'\n words = [re.sub(pattern, '', i) for i in words]\n return words\n\ndef punctuationAndLower(words):\n words = words.translate(str.maketrans('', '', string.punctuation))\n words = words.lower()\n return words\n\ndef stemming(words):\n porter_stemmer = PorterStemmer()\n\n for i in range(len(words)):\n words[i] = porter_stemmer.stem(words[i])\n\n return words\n\ndef stopwordRemoval(words):\n with open(\"files/stoplist.txt\", mode=\"r\", encoding=\"utf-8\") as file:\n stopwordList = set()\n for line in file:\n stopwordList.add(line.strip())\n\n for word in reversed(words):\n if (word in stopwordList):\n words.remove(word)\n \n return words\n\ndef createIndex(indexNum):\n try:\n os.mkdir(indexNum)\n except FileExistsError:\n pass\n \n wordsList = list()\n word2ID_Dict = {} # 1\n text2ID_Dict = {} # 2\n revertedFile_Dict = {} # 3\n\n fileID = 0\n a = 0 # Testing purposes\n\n for filename in os.listdir(\"CACM\"):\n fileID += 1\n a += 1 # Testing purposes\n\n with open(\"CACM/\" + filename, mode=\"r\", encoding=\"utf-8\") as file:\n words = file.read()\n words = punctuationAndLower(words)\n words = words.split()\n words = removeNumbers(words)\n words = list(filter(None, words))\n\n words.remove('html')\n words.remove('pre')\n words.remove('pre')\n words.remove('html')\n\n if(indexNum == \"Index-1\"):\n pass\n elif(indexNum == \"Index-2\"):\n words = stemming(words)\n elif(indexNum == \"Index-3\"):\n words = stopwordRemoval(words)\n elif(indexNum == \"Index-4\"):\n words = stopwordRemoval(words)\n words = stemming(words)\n\n '''creating the text2ID dictionary'''\n text2ID_Dict[filename] = [fileID, len(words)]\n\n '''creating the revertedFile dictionary'''\n for word in words:\n if (word in revertedFile_Dict) and (revertedFile_Dict[word][0][0] != fileID):\n revertedFile_Dict[word].append([fileID, 1])\n elif (word in revertedFile_Dict) and (revertedFile_Dict[word][0][0] == fileID):\n revertedFile_Dict[word][0][1] += 1\n else:\n revertedFile_Dict[word] = [[fileID, 1]]\n\n # Testing purposes\n # print(sorted(words))\n # if a == 2:\n # print(sorted(words))\n # break\n\n wordsList += words\n\n wordsList = list(sorted(set(wordsList)))\n\n index = 0\n line = 0\n\n '''creating the word2ID dictionary'''\n for word in wordsList:\n index += 1\n frequency = 0\n\n word2ID_Dict[word] = [index, 0, []]\n\n for i in revertedFile_Dict[word]:\n line += 1\n frequency += i[1]\n\n word2ID_Dict[word][2].append(line)\n\n word2ID_Dict[word][1] = frequency\n\n # Testing purposes\n # print(wordsList)\n # print(word2ID_Dict['a.'])\n # print(wordsList)\n # print(text2ID_Dict)\n # print(len(wordsList))\n\n with open(indexNum + \"/text2ID.txt\", mode=\"w\", encoding=\"utf-8\") as file:\n file.write(\"textName textID textLength\\n\")\n\n textNamesList = sorted(list(text2ID_Dict.keys()))\n\n for textName in textNamesList:\n file.write(textName + \" \" + str(text2ID_Dict[textName][0]) + \" \" + str(text2ID_Dict[textName][1]) + \"\\n\")\n\n with open(indexNum + \"/word2ID.txt\", mode=\"w\", encoding=\"utf-8\") as file:\n file.write(\"word wordID frequency line_appearences\\n\")\n\n # wordsList = sorted(list(text2ID_Dict.keys()))\n\n for word in wordsList:\n file.write(word + \" \" + str(word2ID_Dict[word][0]) + \" \" + str(word2ID_Dict[word][1]) + \" \")\n for line in word2ID_Dict[word][2]:\n file.write(str(line) + \" \")\n file.write(\"\\n\")\n\n with open(indexNum + \"/revertedFile.txt\", mode=\"w\", encoding=\"utf-8\") as file:\n file.write(\"wordID textID appearances\\n\")\n\n # wordsList = sorted(list(revertedFile_Dict.keys()))\n\n for word in wordsList:\n for value in revertedFile_Dict[word]:\n file.write(str(word2ID_Dict[word][0]) + \" \" + str(value[0]) + \" \" + str(value[1]) + \"\\n\")\n\n\ncreateIndex(\"Index-1\")\ncreateIndex(\"Index-2\")\ncreateIndex(\"Index-3\")\ncreateIndex(\"Index-4\")","repo_name":"SoursosK/InvertedFiles-Indexing","sub_path":"createIndex.py","file_name":"createIndex.py","file_ext":"py","file_size_in_byte":4702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"31263007595","text":"import tkinter as tk\r\n\r\n\r\nroot = tk.Tk()\r\nroot.title(\"Encryption and Decryption\")\r\nroot.configure(bg=\"#F8F8F8\")\r\n\r\n# 1st Text area\r\nplaintext_label = tk.Label(root, text=\"Decrypted:\", font=(\"Arial\", 14), bg=\"#F8F8F8\")\r\nplaintext_label.grid(row=0, column=0, padx=5, pady=5)\r\nplaintext_entry = tk.Text(root, height=5, width=30, font=(\"Arial\", 12))\r\nplaintext_entry.grid(row=1, column=0, padx=5, pady=5)\r\n\r\n# 2nd text area\r\nciphertext_label = tk.Label(root, text=\"Encrypted:\", font=(\"Arial\", 14), bg=\"#F8F8F8\")\r\nciphertext_label.grid(row=0, column=1, padx=5, pady=5)\r\nciphertext_entry = tk.Text(root, height=5, width=30, font=(\"Arial\", 12))\r\nciphertext_entry.grid(row=1, column=1, padx=5, pady=5)\r\n\r\n\r\nm = 26 #The number of letters in desired language (English is chosen)\r\nk = 3 #The key (can be changed as desired)\r\n\r\n#The encryption process\r\ndef encrypt():\r\n plaintext = plaintext_entry.get(\"1.0\", \"end-1c\") #Acquires plaintext from the 1st text area\r\n ciphertext = \"\"\r\n for char in plaintext:\r\n if char.isalpha():\r\n p = ord(char.lower()) - ord('a')\r\n c = (p + k) % m\r\n ciphertext += chr(c + ord('a'))\r\n else:\r\n ciphertext += char\r\n ciphertext_entry.delete(\"1.0\", \"end\") # Clear the second text area\r\n ciphertext_entry.insert(\"1.0\", ciphertext) # Display the ciphertext in the second text area\r\n\r\nencrypt_button = tk.Button(root, text=\"Encrypt\", command=encrypt, font=(\"Arial\", 12), bg=\"#4CAF50\", fg=\"#FFFFFF\", padx=10, pady=5)\r\nencrypt_button.grid(row=2, column=0, padx=5, pady=10)\r\n#Decryption\r\n\r\ndef decrypt():\r\n ciphertext = ciphertext_entry.get(\"1.0\", \"end-1c\") #Acquires ciphertext from the 2nd text area\r\n plaintext = \"\"\r\n for char in ciphertext:\r\n if char.isalpha():\r\n c = ord(char.lower()) - ord('a')\r\n p = (c - k) % m\r\n plaintext += chr(p + ord('a'))\r\n else:\r\n plaintext += char\r\n plaintext_entry.delete(\"1.0\", \"end\") # Clear the first text area\r\n plaintext_entry.insert(\"1.0\", plaintext) # Display the plaintext in the first text area\r\n\r\ndecrypt_button = tk.Button(root, text=\"Decrypt\", command=decrypt, font=(\"Arial\", 12), bg=\"#F44336\", fg=\"#FFFFFF\", padx=10, pady=5)\r\ndecrypt_button.grid(row=2, column=1, padx=5, pady=10)\r\n\r\n#Text area labels\r\nplaintext_label.config(text=\"Decrypted\")\r\nciphertext_label.config(text=\"Encrypted\")\r\n\r\n#padding\r\nfor i in range(3):\r\n root.grid_rowconfigure(i, weight=1)\r\n root.grid_columnconfigure(i, weight=1)\r\n\r\n# Start the GUI\r\nroot.mainloop()","repo_name":"seagle011/simple-encryption-decryption","sub_path":"Simple encryption and decryption.py","file_name":"Simple encryption and decryption.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"41345977766","text":"# -*- coding: utf-8 -*-\nfrom __future__ import division\ndef qiguais(a,b):\n cont=0\n for i in range(0, len(a),1):\n for j in range(0, len(b),1):\n if a[i]==b[j]:\n cont=cont+1\n return cont\nn=input('numero de elementos da lista 1: ')\nm=input('numero de elementos da lista 2: ')\na=[]\nb=[]\nfor i in range(0, n,1):\n a.append(input('elemento de a: '))\nfor j in range(0, m,1):\n b.append(input('elemento de b: '))\nprint(qiguais(a,b))","repo_name":"rafaelperazzo/programacao-web","sub_path":"moodledata/vpl_data/50/usersdata/93/17711/submittedfiles/contido.py","file_name":"contido.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"15471088353","text":"class Solution(object):\n def minSubArrayLen(self, target, nums):\n \"\"\"\n :type target: int\n :type nums: List[int]\n :rtype: int\n \"\"\"\n result = sys.maxint # 리턴할 length result\n left = 0 # left 포인터\n sum = 0 # 값 sum\n \n for index in range(0, len(nums)): #배열 index 만큼 순회\n sum = sum + nums[index] # index의 값 sum에 추가\n while sum>= target: # sum이 target 값 보다 크거나 같을 경우\n result = min(result, index + 1 - left)\n sum = sum - nums[left]\n left = left + 1\n \n if result != sys.maxint:\n return result\n else:\n return 0","repo_name":"serberoos/algorithm","sub_path":"leetcode/209 minimum sizesub array sum.py","file_name":"209 minimum sizesub array sum.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"18223100919","text":"from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom . import views\n\napp_name = \"item\"\nurlpatterns = [\n\n path('', views.list_item, name=\"list_item\"),\n path('detail/item/', views.detail_item, name=\"detail_item\"),\n path('create/',views.create_item, name = 'create_item'),\n path('register/',views.register_customer, name=\"register_customer\"),\n path('detail/customer/',views.detail_customer, name='detail_customer'),\n path('list/customer',views.list_customer, name='list_customer'),\n path('edit/item/', views.edit_item,name=\"edit_item\"),\n path('edit/customer/', views.edit_customer,name=\"edit_customer\"),\n path('delete/customer/',views.delete_customer, name='delete_customer'),\n path('delete/item/',views.delete_item, name='delete_item'),\n path('vote/add/',views.vote_add, name=\"vote_add\"),\n path('vote/minus/',views.vote_minus, name=\"vote_minus\")\n ]\n","repo_name":"rkdurd34/piro12_inventory_management","sub_path":"item/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"40604218337","text":"from collections import deque\n\n\n# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string.\n\n :type root: TreeNode\n :rtype: str\n \"\"\"\n data = []\n\n def dfs(node: TreeNode) -> None:\n if node is None:\n data.append(\"None\")\n return\n data.append(str(node.val))\n dfs(node.left)\n dfs(node.right)\n\n dfs(root)\n return \",\".join(data)\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree.\n\n :type data: str\n :rtype: TreeNode\n \"\"\"\n data = deque(data.split(\",\"))\n if data[0] == \"None\":\n return None\n\n def dfs(node: TreeNode) -> None:\n node.val = int(data.popleft())\n\n if data[0] == \"None\":\n data.popleft()\n else:\n node.left = TreeNode(0)\n dfs(node.left)\n\n if data[0] == \"None\":\n data.popleft()\n else:\n node.right = TreeNode(0)\n dfs(node.right)\n\n root = TreeNode(0)\n dfs(root)\n return root\n\n\n# Your Codec object will be instantiated and called as such:\n# ser = Codec()\n# deser = Codec()\n# ans = deser.deserialize(ser.serialize(root))\n","repo_name":"endruz/LeetCode","sub_path":"src/0297.serialize-and-deserialize-binary-tree/0297.serialize-and-deserialize-binary-tree.py","file_name":"0297.serialize-and-deserialize-binary-tree.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"32827199558","text":"import re\nfrom contextlib import closing\n\nimport discord\nimport psycopg2\n\nfrom core import EventListener, Plugin, Server, Side, Status, utils, event\nfrom typing import Union, cast\nfrom plugins.creditsystem.player import CreditPlayer\n\n\nclass SlotBlockingListener(EventListener):\n def __init__(self, plugin: Plugin):\n super().__init__(plugin)\n\n def load_params_into_mission(self, server: Server):\n config: dict = self.plugin.get_config(server, use_cache=False)\n if config:\n server.sendtoDCS({\n 'command': 'loadParams',\n 'plugin': self.plugin_name,\n 'params': config\n })\n guild = self.bot.guilds[0]\n roles = [\n discord.utils.get(guild.roles, name=x)\n for x in config.get('VIP', {}).get('discord', [])\n ]\n if not roles:\n return\n # get all linked members\n conn = self.pool.getconn()\n try:\n with closing(conn.cursor()) as cursor:\n cursor.execute('SELECT ucid, discord_id FROM players WHERE discord_id != -1')\n for row in cursor.fetchall():\n member = guild.get_member(row[1])\n if not member:\n continue\n for role in member.roles:\n if role in roles:\n server.sendtoDCS({\n 'command': 'uploadUserRoles',\n 'ucid': row[0],\n 'roles': [x.name for x in member.roles]\n })\n break\n except (Exception, psycopg2.DatabaseError) as error:\n self.log.exception(error)\n finally:\n self.pool.putconn(conn)\n\n @event(name=\"registerDCSServer\")\n async def registerDCSServer(self, server: Server, data: dict) -> None:\n # the server is running already\n if data['channel'].startswith('sync-'):\n self.load_params_into_mission(server)\n\n @event(name=\"onMissionLoadEnd\")\n async def onMissionLoadEnd(self, server: Server, data: dict) -> None:\n self.load_params_into_mission(server)\n\n def _get_points(self, server: Server, player: CreditPlayer) -> int:\n config = self.plugin.get_config(server)\n if 'restricted' in config:\n for unit in config['restricted']:\n if ('unit_type' in unit and unit['unit_type'] == player.unit_type) or \\\n ('unit_name' in unit and unit['unit_name'] in player.unit_name) or \\\n ('group_name' in unit and unit['group_name'] in player.group_name):\n if player.sub_slot == 0 and 'points' in unit:\n return unit['points']\n elif player.sub_slot > 0 and 'crew' in unit:\n return unit['crew']\n return 0\n\n def _get_costs(self, server: Server, data: Union[CreditPlayer, dict]) -> int:\n config = self.plugin.get_config(server)\n unit_type = data.unit_type if isinstance(data, CreditPlayer) else data['unit_type']\n unit_name = data.unit_name if isinstance(data, CreditPlayer) else data['unit_name']\n group_name = data.group_name if isinstance(data, CreditPlayer) else data['group_name']\n if 'restricted' in config:\n for unit in config['restricted']:\n if ('unit_type' in unit and re.match(unit['unit_type'], unit_type)) or \\\n ('unit_name' in unit and re.match(unit['unit_name'], unit_name)) or \\\n ('group_name' in unit and re.match(unit['group_name'], group_name)):\n if 'costs' in unit:\n return unit['costs']\n return 0\n\n def _is_vip(self, config: dict, data: dict) -> bool:\n if 'VIP' not in config:\n return False\n if 'ucid' in config['VIP']:\n ucid = config['VIP']['ucid']\n if (isinstance(ucid, str) and ucid == data['ucid']) or (isinstance(ucid, list) and data['ucid'] in ucid):\n return True\n if 'discord' in config['VIP']:\n member = self.bot.get_member_by_ucid(data['ucid'])\n return utils.check_roles(config['VIP']['discord'], member) if member else False\n return False\n\n @event(name=\"onPlayerConnect\")\n async def onPlayerConnect(self, server: Server, data: dict) -> None:\n config = self.plugin.get_config(server)\n if not config or data['id'] == 1:\n return\n if self._is_vip(config, data) and 'audit' in config['VIP'] and config['VIP']['audit']:\n member = self.bot.get_member_by_ucid(data['ucid'])\n if member:\n message = \"VIP member {} joined\".format(utils.escape_string(member.display_name))\n else:\n message = \"VIP user {}(ucid={} joined\".format(utils.escape_string(data['name']), data['ucid'])\n await self.bot.audit(message, server=server)\n\n @event(name=\"onPlayerChangeSlot\")\n async def onPlayerChangeSlot(self, server: Server, data: dict) -> None:\n config = self.plugin.get_config(server)\n if not config:\n return\n if 'side' in data and 'use_reservations' in config and config['use_reservations']:\n player: CreditPlayer = cast(CreditPlayer, server.get_player(ucid=data['ucid'], active=True))\n if player and player.deposit > 0:\n old_points = player.points\n player.points -= player.deposit\n player.audit('buy', old_points, 'Points taken for using a reserved module')\n player.deposit = 0\n # if mission statistics are enabled, use BIRTH events instead\n if player and not self.bot.config.getboolean(server.installation, 'MISSION_STATISTICS') and \\\n Side(data['side']) != Side.SPECTATOR:\n # only pilots have to \"pay\" for their plane\n if int(data['sub_slot']) == 0:\n player.deposit = self._get_costs(server, data)\n\n @event(name=\"onMissionEvent\")\n async def onMissionEvent(self, server: Server, data: dict) -> None:\n config = self.plugin.get_config(server)\n if not config:\n return\n if data['eventName'] == 'S_EVENT_BIRTH':\n initiator = data['initiator']\n # check, if they are a human player\n if 'name' not in initiator:\n return\n if 'use_reservations' in config and config['use_reservations']:\n player: CreditPlayer = cast(CreditPlayer, server.get_player(name=initiator['name'], active=True))\n # only pilots have to \"pay\" for their plane\n if player and player.sub_slot == 0:\n player.deposit = self._get_costs(server, player)\n\n @event(name=\"onGameEvent\")\n async def onGameEvent(self, server: Server, data: dict) -> None:\n config = self.plugin.get_config(server)\n if not config or 'restricted' not in config or server.status != Status.RUNNING:\n return\n if data['eventName'] == 'kill':\n # players only lose points if they weren't killed as a teamkill\n if data['arg4'] != -1 and data['arg3'] != data['arg6']:\n player: CreditPlayer = cast(CreditPlayer, server.get_player(id=data['arg4']))\n if player and 'use_reservations' in config and config['use_reservations']:\n if player.deposit > 0:\n old_points = player.points\n player.points -= player.deposit\n player.audit('buy', old_points, 'Points taken for being killed in a reserved module')\n player.deposit = 0\n # if the remaining points are not enough to stay in this plane, move them back to spectators\n if player.points < self._get_points(server, player):\n server.move_to_spectators(player)\n elif data['eventName'] == 'crash':\n player: CreditPlayer = cast(CreditPlayer, server.get_player(id=data['arg1']))\n if not player:\n return\n if 'use_reservations' in config and config['use_reservations']:\n if player.deposit > 0:\n old_points = player.points\n player.points -= player.deposit\n player.audit('buy', old_points, 'Points taken for crashing in a reserved module')\n player.deposit = 0\n else:\n old_points = player.points\n player.points -= self._get_costs(server, player)\n player.audit('buy', old_points, 'Points taken for crashing in a reserved module')\n if player.points < self._get_points(server, player):\n server.move_to_spectators(player)\n elif data['eventName'] == 'landing':\n # clear deposit on landing\n player: CreditPlayer = cast(CreditPlayer, server.get_player(id=data['arg1']))\n if player and player.deposit > 0:\n player.deposit = 0\n elif data['eventName'] == 'takeoff':\n # take deposit on takeoff\n if 'use_reservations' in config and config['use_reservations']:\n player: CreditPlayer = cast(CreditPlayer, server.get_player(id=data['arg1']))\n if player and player.deposit == 0 and int(player.sub_slot) == 0:\n player.deposit = self._get_costs(server, player)\n elif data['eventName'] == 'disconnect':\n player: CreditPlayer = cast(CreditPlayer, server.get_player(id=data['arg1']))\n if player and player.deposit > 0:\n old_points = player.points\n player.points -= player.deposit\n player.audit('buy', old_points, 'Points taken for using a reserved module')\n player.deposit = 0\n elif data['eventName'] == 'mission_end':\n # give all players their credit back, if the mission ends, and they are still airborne\n for player in server.players.values():\n player.deposit = 0\n","repo_name":"Special-K-s-Flightsim-Bots/DCSServerBot","sub_path":"plugins/slotblocking/listener.py","file_name":"listener.py","file_ext":"py","file_size_in_byte":10347,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"94"} +{"seq_id":"22974601121","text":"import numpy as np\nfrom modules.calcModuleTP import link2index as l2i\n\ndef torSpring(kr, qi, i, j, theta0):\n thetai = qi[l2i(i, \"theta\")]\n thetaj = qi[l2i(j, \"theta\")]\n deltaTheta = thetai-thetaj\n Q_SpringThetai = -kr*(deltaTheta-theta0)\n Q_SpringThetaj = kr*(deltaTheta-theta0)\n\n return Q_SpringThetai, Q_SpringThetaj\n\ndef torDamp(cr, qiDot, i, j):\n omegai = qiDot[l2i(i, \"theta\")]\n omegaj = qiDot[l2i(j, \"theta\")]\n deltaOmega = omegai-omegaj\n Q_DampThetai = -cr*deltaOmega\n Q_DampThetaj = cr*deltaOmega\n\n return Q_DampThetai, Q_DampThetaj","repo_name":"eigeneddie/triple-pendulum-simulation-python","sub_path":"modules/forceModule.py","file_name":"forceModule.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"94"} +{"seq_id":"2307094473","text":"# 2019\n# author: yuxuan\n\nimport numpy as np\nimport pandas as pd\n\nimport statsmodels.api as sm\nfrom sklearn.model_selection import train_test_split\n\n\nclass LinearModelSignals(object):\n def __init__(\n self, olsres, enter_buy_threshold=None, enter_sell_threshold=None,\n exit_buy_threshold=None, exit_sell_threshold=None):\n self._olsres = olsres\n self._enter_buy = enter_buy_threshold\n self._enter_sell = enter_sell_threshold\n self._exit_buy = exit_buy_threshold\n self._exit_sell = exit_sell_threshold\n assert self._enter_sell <= self._exit_buy\n assert self._exit_sell <= self._enter_buy\n self._pred = np.nan\n\n def on_feature(self, feature):\n x = np.array([feature])\n if np.any(np.isnan(x)):\n return\n #x = sm.add_constant(feature)\n self._pred = self._olsres.predict(x)\n\n def should_enter_buy(self):\n return self._pred > self._enter_buy\n\n def should_exit_buy(self):\n return self._pred < self._exit_buy\n\n def should_enter_sell(self):\n return self._pred < self._enter_sell\n\n def should_exit_sell(self):\n return self._pred > self._exit_sell\n\n\nclass LinearModel(object):\n def __init__(self):\n self._result = None\n\n def train_model(self, x, y):\n X_train, X_test, y_train, y_test = train_test_split(x, y, train_size=0.8, random_state=42)\n X_train = sm.add_constant(X_train)\n model = sm.OLS(y_train, X_train)\n self._result = model.fit()\n print(self._result.summary())\n y_hat = self._result.predict(X_train)\n y_test_hat = self._result.predict(sm.add_constant(X_test))\n print('test y corr:', np.corrcoef(y_test, y_test_hat))\n y = pd.DataFrame({'y': y_train.tolist(), 'yhat': y_hat.tolist()})\n y_test = pd.DataFrame({'y': y_test.tolist(), 'yhat': y_test_hat.tolist()})\n y.to_csv('lm_y_train.csv', index=False)\n y_test.to_csv('lm_y_test.csv', index=False)\n\n def predict(self, x):\n xnew = sm.add_constant(x)\n if x.shape == xnew.shape:\n assert x.ndim == 2\n xnew = np.insert(x, 0, values=1, axis=1)\n return self._result.predict(xnew)\n","repo_name":"pyx1992/hftml","sub_path":"model/linear_model.py","file_name":"linear_model.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"21030804562","text":"\"\"\"\nCOCOWriter and save_to functions.\n\"\"\"\n\nimport json\nimport logging\nimport os\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom typing import List\nimport mmcv\n\nfrom .coco import _isArrayLike\nfrom .utils import roidb_to_cocoDt\n\n__all__ = [\"COCOWriter\", \"save_to_cocoDt\", \"save_to_cocoGt\"]\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass COCOWriter:\n\n \"\"\"Convert roidb to coco(gt format).\n\n Parameters\n ----------\n actions : bool\n add actions annotations\n videos : bool\n add videos annotations\n person: bool\n set categories with person only\n image_ext: str\n default image ext used in images['file_name'] when there is no ext\n\n Example\n -------\n >>> from piata.coco import COCOWriter\n >>> writer = COCOWriter(videos=True, person=True)\n >>> writer.add_roidb(roidb)\n >>> writer.save('annots.json')\n\n \"\"\"\n\n def __init__(\n self,\n *,\n actions=False,\n videos=False,\n person=False,\n dummy_object=False,\n image_ext=\".jpg\",\n image_height=None,\n image_width=None,\n description=\"Created by piata.\",\n version=\"v1\",\n ):\n if actions:\n self._actions = list()\n if videos:\n self._videos = list()\n self._image_ext = image_ext\n self._image_height = image_height\n self._image_width = image_width\n self._person = person\n self._dummy_object = dummy_object\n self._categories = list()\n self._info = dict()\n self._images = defaultdict(dict) # id: image\n self._annotations = defaultdict(dict) # id: anno\n # initialize info\n self._info[\"description\"] = description\n self._info[\"version\"] = version \n self._info[\"year\"] = int(datetime.today().isoformat()[:4])\n self._info[\"contributor\"] = os.environ.get(\"USER\", \"piata\")\n self._info[\"date_created\"] = (\n datetime.today().isoformat()[:10].replace(\"-\", \"/\")\n )\n # initialize categories\n if self._person:\n self._categories = [{\"id\": 1, \"name\": \"person\"}]\n if self._dummy_object:\n self._categories = [{\"id\": 1, \"name\": \"dummy_object\"}]\n self._anno_id = 1\n\n def _is_coco_image_name(self, image_name):\n name = os.path.splitext(image_name)[0]\n # if len(name) != 12:\n # return False, None\n try:\n _id = int(name)\n except Exception as e:\n return False, None\n return True, _id\n\n def _get_image_id(self, image_name, prefix=None, use_coco_name=False):\n \"\"\"\n Get image_id given image_name, starts from 1.\n For video-type data, please set prefix.\n\n \"\"\"\n if not hasattr(self, \"_image_name_to_id\"):\n self._image_name_to_id = dict()\n image_key = prefix + \"_\" + image_name if prefix else image_name\n if image_key not in self._image_name_to_id:\n is_coco_name, image_id = self._is_coco_image_name(image_name)\n if not is_coco_name or not use_coco_name:\n image_id = len(self._image_name_to_id) # image_id is 0-based\n self._image_name_to_id[image_key] = image_id\n return self._image_name_to_id[image_key]\n\n def _get_video_id(self, videoname):\n \"\"\"\n Get video_id given videoname, starts from 1.\n \"\"\"\n if not hasattr(self, \"_video_name_to_id\"):\n self._video_name_to_id = dict()\n if videoname not in self._video_name_to_id:\n video_id = len(self._video_name_to_id) + 1\n self._video_name_to_id[videoname] = video_id\n return self._video_name_to_id[videoname]\n\n def _set_video_in_image(self, image, videoname):\n \"\"\"\n Set video tag in image.\n\n \"video_id\": 1 # 属于哪个video\n \"frame_id\": 1, # 从video获取frame\n \"prev_image_id\": 300, # 前一帧的image_id\n \"next_image_id\": 302, # 后一帧的image_id\n\n \"\"\"\n video_id = self._get_video_id(videoname)\n image_name = image[\"file_name\"]\n try:\n if \"_\" in image[\"file_name\"]:\n frame_id = int(\n os.path.splitext(image[\"file_name\"])[0].split(\"_\")[-1]\n )\n else:\n frame_id = int(os.path.splitext(image[\"file_name\"])[0])\n except Exception as e:\n logging.error(\"invalid image_name when using videos\")\n raise e\n prev_image_id = self._get_image_id(image_name, prefix=videoname) - 1\n next_image_id = self._get_image_id(image_name, prefix=videoname) + 1\n image.update(\n {\n \"video_id\": video_id,\n \"frame_id\": frame_id,\n \"prev_image_id\": prev_image_id,\n \"next_image_id\": next_image_id,\n }\n )\n\n def add_roidb(\n self,\n roidb: List,\n videoname: str = None,\n roi_wise: bool = False,\n use_coco_name: str = False,\n ):\n \"\"\"\n Add roidb in coco.\n\n Args:\n roidb: roi list\n videoname: str\n videoname, only used when using videos\n roi_wise: str\n if input roidb is roi_wise or not.\n use_coco_name: str\n if True, try to use image_name as image_id.\n if Fales, force to use index as image_id.\n \"\"\"\n logger.info(\"adding roidb ...\")\n assert isinstance(roidb, list)\n # avoid loop-import\n from pdebug.piata import Input\n\n if not roi_wise:\n roidb_roiwise = Input.imgwise_to_roiwise(roidb)\n else:\n roidb_roiwise = roidb\n\n for roi in roidb_roiwise:\n image_name = roi[\"image_name\"]\n if os.path.splitext(image_name)[1] == \"\":\n image_name += self._image_ext\n # update self._images\n image_id = self._get_image_id(image_name, prefix=videoname, use_coco_name=use_coco_name)\n if image_id not in self._images:\n image = {\n \"id\": int(image_id),\n \"file_name\": image_name,\n }\n if self._image_height:\n image.update({\"height\": self._image_height})\n if self._image_width:\n image.update({\"width\": self._image_width})\n if \"image_height\" in roi:\n image.update({\"height\": roi[\"image_height\"]})\n if \"image_width\" in roi:\n image.update({\"width\": roi[\"image_width\"]})\n if hasattr(self, \"_videos\"):\n self._set_video_in_image(image, videoname)\n self._images[image_id] = image\n # update self._annotations\n anno = {\n \"id\": int(self._anno_id),\n \"image_id\": int(image_id),\n }\n for key in roi:\n if key == \"image_name\":\n continue\n if key == \"image_id\":\n continue\n if key == \"image_height\":\n continue\n if key == \"image_width\":\n continue\n if key == \"unknown\":\n continue\n if key == \"boxes\":\n assert roi[key].ndim == 1\n x1, y1, x2, y2 = roi[key][:4]\n bbox = [x1, y1, x2 - x1, y2 - y1]\n bbox = [float(x) for x in bbox]\n anno.update({\"bbox\": bbox})\n continue\n if key in (\"segmentation\", \"masks\", \"gt_masks\"):\n anno.update({\"segmentation\": roi[key]})\n \n if mmcv.is_list_of(anno[\"segmentation\"], int):\n anno.update({\"segmentation\": [roi[key]]})\n\n continue\n # convert np.ndarray to list of float\n\n if key in [\"category_id\", \"iscrowd\"]:\n # keep raw dtype\n value = roi[key]\n if isinstance(value, (list, tuple)):\n assert len(value) == 1\n value = value[0]\n else:\n if _isArrayLike(roi[key]):\n value = [float(x) for x in roi[key]]\n else:\n try:\n value = float(roi[key])\n except ValueError as e:\n value = roi[key]\n anno.update({key: value})\n\n # force id and image_id to int\n anno[\"id\"] = int(anno[\"id\"])\n anno[\"image_id\"] = int(anno[\"image_id\"])\n\n if \"category_id\" not in anno:\n if self._person:\n anno.update({\"category_id\": 1})\n elif self._dummy_object:\n anno.update({\"category_id\": 1})\n else:\n # get category info from classes or gt_classes\n if \"classes\" in anno or \"gt_classes\" in anno:\n category_name = (\n anno[\"classes\"]\n if \"classes\" in anno\n else anno[\"gt_classes\"]\n )\n category_id = [\n cat[\"id\"]\n for cat in self._categories\n if cat[\"name\"] == category_name\n ]\n if not category_id:\n cur_ids = [cat[\"id\"] for cat in self._categories]\n category_id = max(cur_ids) + 1 if cur_ids else 1\n self._categories.append(\n {\"id\": category_id, \"name\": category_name}\n )\n else:\n category_id = category_id[0]\n anno.update({\"category_id\": category_id})\n else:\n pass\n if \"iscrowd\" not in anno:\n anno[\"iscrowd\"] = 0\n if \"area\" not in anno:\n assert \"bbox\" in anno\n anno[\"area\"] = anno[\"bbox\"][2] * anno[\"bbox\"][3]\n # if 'keypoints' not in anno:\n # anno['keypoints'] = [0.0 for _ in range(3 * 17)]\n # if 'num_keypoints' not in anno:\n # anno['num_keypoints'] = 0 # len(anno['keypoints']) // 3\n self._annotations[self._anno_id] = anno\n self._anno_id += 1\n\n def set_info(self, **kwargs):\n \"\"\"\n Add info in coco.\n \"\"\"\n self._info.update(kwargs)\n\n def set_categories(self, cats=None, auto_fill=False):\n \"\"\"\n Set categories.\n \"\"\"\n if isinstance(cats, dict):\n self._categories = [cats]\n elif isinstance(cats, list):\n self._categories = cats\n else:\n raise ValueError\n\n if auto_fill:\n categories = []\n for idx, cat in enumerate(self._categories):\n assert isinstance(cat, str)\n categories.append({\"id\": idx+1, \"name\": cat, \"supercategory\": \"\"})\n self._categories = categories\n\n def set_videos(self):\n \"\"\"\n Set videos based on self._video_name_to_id.\n\n \"\"\"\n if hasattr(self, \"_videos\") and hasattr(self, \"_video_name_to_id\"):\n self._videos = [\n {\"id\": self._video_name_to_id[k], \"name\": k}\n for k in self._video_name_to_id\n ]\n else:\n logging.warning(\"videos not found\")\n # revise begin and end of a video.\n videoToImgs = defaultdict(list)\n for image in self._images.values():\n videoToImgs[image[\"video_id\"]].append(image[\"id\"])\n for video_id in videoToImgs:\n imageIds = videoToImgs[video_id]\n prev_imageIds = [\n self._images[x][\"prev_image_id\"] for x in imageIds\n ]\n next_imageIds = [\n self._images[x][\"next_image_id\"] for x in imageIds\n ]\n for image_id in imageIds:\n if self._images[image_id][\"prev_image_id\"] not in imageIds:\n self._images[image_id][\"prev_image_id\"] = -1\n if self._images[image_id][\"next_image_id\"] not in imageIds:\n self._images[image_id][\"next_image_id\"] = -1\n\n def save(self, savename):\n \"\"\"\n Save to json file.\n \"\"\"\n if hasattr(self, \"_videos\") and len(self._videos) == 0:\n self.set_videos()\n if len(self._categories) == 0:\n logger.warning(\n \"categories is empty, you can add classes or \"\n \"gt_classes in roidb to generate correct categories.\"\n )\n self.set_categories({\"id\": 1, \"name\": \"default\"})\n data = {\n \"images\": [self._images[x] for x in self._images],\n \"annotations\": [self._annotations[x] for x in self._annotations],\n \"categories\": self._categories,\n \"info\": self._info,\n }\n if hasattr(self, \"_actions\"):\n data.update({\"actions\": self._actions})\n if hasattr(self, \"_videos\"):\n data.update({\"videos\": self._videos})\n logging.info(f\"saving to {savename} ...\")\n\n with open(savename, \"w\", encoding='utf8') as fid:\n json.dump(data, fid, indent=2, ensure_ascii=False)\n\n\ndef save_to_cocoDt(savefile, roidb, **kwargs):\n \"\"\"Save roidb to coco_dt format.\n\n Parameters\n ----------\n savefile : str\n filename\n roidb : list\n roidb list\n\n Returns\n -------\n None\n\n Example\n -------\n >>> from piata.coco import save_to_cocoDt\n >>> save_to_cocoDt('output.json', roidb)\n\n \"\"\"\n assert isinstance(savefile, str)\n assert isinstance(roidb, list)\n roidb_to_cocoDt(roidb, savefile, **kwargs)\n logging.info(\"saved to %s\" % savefile)\n\n\ndef save_to_cocoGt(savefile, roidb, **kwargs):\n \"\"\"Save roidb to coco_gt format.\n\n Parameters\n ----------\n savefile : str\n filename\n roidb : list\n roidb list\n\n Returns\n -------\n None\n\n Example\n -------\n >>> from piata.coco import save_to_cocoGt\n >>> save_to_cocoGt('output.json', roidb)\n\n \"\"\"\n assert isinstance(savefile, str)\n assert isinstance(roidb, list)\n writer = COCOWriter()\n writer.add_roidb(roidb)\n writer.save(savefile)\n","repo_name":"DuinoDu/pdebug","sub_path":"pdebug/piata/coco/writer.py","file_name":"writer.py","file_ext":"py","file_size_in_byte":14642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"71025367991","text":"from termcolor import colored\nimport dungeon\nimport os\n\nwelcome_file = 'welcome.txt'\ncredits_file = 'credits.txt'\nhelp_file = 'help.txt'\n\n\ndef start():\n \"\"\"Start Game and show Menu\"\"\"\n os.system('clear')\n welcome()\n display_menu()\n while True:\n command = input('Enter command :')\n if command == '1':\n save_count = 0\n break\n elif command == '2':\n credits()\n elif command == '3':\n help()\n elif command == '4':\n exit()\n elif command == '5':\n save_count = load()\n os.system('clear')\n break\n else:\n print('Wrong command')\n return save_count\n\n\n\ndef welcome():\n \"\"\"Print Welcome\"\"\"\n with open(welcome_file, 'r') as content:\n text = content.read()\n print(colored(text, 'red'))\n\n\ndef credits():\n \"\"\"Show credits\"\"\"\n os.system('clear')\n with open(credits_file, 'r') as content:\n\t text = content.read()\n\t print(colored(text, 'green'))\n\t input(\"Enter - back to menu\")\n\ndef helpp():\n\tos.system('clear')\n\twith open(help_file, 'r') as content:\n\t\ttext = content.read()\n\t\tprint(colored(text, 'green'))\n\t\tinput(\"Enter - back to menu or game\")\n\n\ndef win_game():\n \"\"\"Show screen when you win\"\"\"\n with open('win_game.txt', 'r') as content:\n text = content.read()\n print(colored(text, 'yellow'))\n\n\ndef game_over():\n \"\"\"Show screen when you loose\"\"\"\n with open('game_over.txt', 'r') as content:\n text = content.read()\n print(colored(text, 'red'))\n print()\n\n\ndef display_menu():\n \"\"\"display menu\"\"\"\n print('Start Game press 1')\n print('Credits press 2')\n print('Help press 3')\n print('Quit game press 4')\n print('Load saved game: 5')\n","repo_name":"Castarox/dgn_repo","sub_path":"start_game.py","file_name":"start_game.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"29265631489","text":"#!/usr/bin/env python\nimport rospy\nfrom gantry_control.msg import *\nfrom std_msgs.msg import Bool\nfrom std_srvs.srv import Trigger, TriggerResponse\nimport math\n\nimport valves_gcode_control\n\n#Get Parameters\nDEBUG=rospy.get_param('GLOBAL_DEBUG') or rospy.get_param('/gantry/DEBUG')\nparams=rospy.get_param('gantry')\nSAVE_DATA=rospy.get_param('GLOBAL_SAVE_DATA')\n#hand_type = params.get_param('hand_actuation');\n\nready = True\nposReached = 0\nhomed = False\ncurr_pos=[]\nfileNames=[]\nfactor=1.0\n\n\ndef callbackReady(data):\n\tglobal ready\n\t\n\tif DEBUG:\n\t\tprint(\"---coordinateMoves: Update READY State from: %d\"%(ready)),\n\tready=data.data\n\tif DEBUG:\n\t\tprint(\"\\tto: %d\"%(ready))\n\t\ndef callbackNewRun(data):\n\tif DEBUG:\n\t\tprint(fileNames)\n\tfileNames.append(data.filename)\n\tif DEBUG:\n\t\tprint(fileNames)\n\t\ndef updatePos(data):\n\tglobal posReached\n\tglobal curr_pos\n\t\n\tcurr_pos = data.position\n\tposReached = 1\n\n\tif DEBUG:\n\t\tprint('---coordinateMoves: Update Position: '),\n\t\tprint('%f\\t%f\\t%f'%(curr_pos[0],curr_pos[1],curr_pos[2]))\n\t\tprint('Position Reached? %d'%(posReached))\n\ndef callbackHomed(data):\n\tglobal homed\n\thomed=data.data\n\tif DEBUG:\n\t\tprint('---coordinateMoves: Update Homing Status: %d'%(homed))\n\n\n\ndef queryPosition():\n\trospy.wait_for_service('/gantry/get_pos')\n\tquery=rospy.ServiceProxy('/gantry/get_pos', Trigger)\n\ttry:\n\t\tresp1 = query()\n\texcept rospy.ServiceException as exc:\n\t\tif DEBUG:\n\t\t\tprint(\"Service did not process request: \" + str(exc))\n\n\n\ndef server():\n\tglobal posReached\n\tglobal curr_pos\n\tglobal ready\n\tglobal fileNames\n\n\tpubPos = rospy.Publisher(\"/gantry/set_position\", trajectory, queue_size=10)\n\tpubAct =rospy.Publisher(\"gantry/set_actuation\", actuation, queue_size=10)\n\tpubRun =rospy.Publisher(\"gantry/runFile_ack\", Bool, queue_size=10)\n\tpubComplete =rospy.Publisher(\"/gantry/run_complete\", Bool, queue_size=10)\n\t \n\t#also actuation will be a service\n\trospy.Subscriber(\"/gantry/ready\", Bool, callbackReady)\n\trospy.Subscriber(\"/gantry/homed\",Bool,callbackHomed)\n\trospy.Subscriber(\"/gantry/runFile\", runFile, callbackNewRun)\n\trospy.Subscriber(\"/gantry/curr_position\", currPos, updatePos)\n\n\trospy.init_node('motion_server', anonymous=True)\n\n\trate = rospy.Rate(100)\n\touterRate=rospy.Rate(5)\n\twaitRate=rospy.Rate(5)\n\twhile not rospy.is_shutdown():\n\t\tif homed and fileNames:\n\t\t\t#Read the next line in the file\n\t\t\tprint('Starting...')\n\t\t\tprint(fileNames[0])\n\t\t\trospy.loginfo(True)\n\t\t\tpubRun.publish(True)\n\t\t\tf = open(fileNames[0],'r')\n\n\t\t\t# Interpret trajectory line by aine\n\t\t\tfor line in f:\n\t\t\t\tif not homed:\n\t\t\t\t\tbreak\n\n\t\t\t\tl = line.strip() # Strip all EOL characters for streaming\n\t\t\t\t\n\t\t\t\t#Get parts of trajectory\n\t\t\t\tnew_pos = l.split('\\t')\n\t\t\t\tif not len(new_pos)>1:\n\t\t\t\t\tbreak\n\t\t\t\tnew_pos=map(float,new_pos) #Convert everything to a float\n\t\t\t\tcurr_timestep=new_pos[0]\n\t\t\t\t\n\t\t\t\t#Get Commanded Positions\n\t\t\t\tposition = new_pos[1:4]\n\t\t\t\tspeed = new_pos[4]\n\t\t\t\t\n\t\t\t\t#Get the speed\n\t\t\t\tif speed==0.0:\n\t\t\t\t\tspeed=math.sqrt(sum([(a-b)**2 for a,b in zip(curr_pos,position)]))/curr_timestep*60*factor\n\n\t\t\t\tif speed==0.0:\n\t\t\t\t\tspeed=500\n\n\t\t\t\t#Send actuation if it's different than before\n\t\t\t\t#Wait for the ready signal after each send\n\t\t\t\t#ASSUME ACTUATION SIGNALS ARE PSI\n\t\t\t\tact=actuation()\n\t\t\t\tact.levels=new_pos[5:]\n\n\t\t\t\trospy.loginfo(act)\n\t\t\t\tpubAct.publish(act)\n\t\t\t\t\n\t\t\t\t#Send position if it's different than before\n\t\t\t\t#Wait for the ready signal after each send\n\t\t\t\tpos=trajectory()\n\t\t\t\tpos.position=position\n\t\t\t\tpos.speed=speed\n\t\t\t\t\n\t\t\t\tposReached = False\n\t\t\t\t\n\t\t\t\trospy.loginfo(pos)\n\t\t\t\tpubPos.publish(pos)\n\t\t\t\t#Sleep for the specified length\n\t\t\t\t#print('Sleep for %f sec'%(curr_timestep))\n\t\t\t\t\n\t\t\t\t\n\t\t\t\tif curr_timestep==0:\n\t\t\t\t\tcurr_timestep=0.001\n\t\t\t\t\n\t\t\t\t\n\t\t\t\trate=rospy.Rate(1/curr_timestep)\n\t\t\t\trate.sleep()\n\t\t\t\t\n\t\t\t\tif not (ready and posReached):\n\t\t\t\t\twhile (not rospy.is_shutdown()) and homed:\n\t\t\t\t\t\tif DEBUG:\n\t\t\t\t\t\t\tprint(\"READY: %d\\tPOS REACHED: %d\"%(ready,posReached))\n\t\t\t\t\t\tif (ready and posReached):\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tqueryPosition()\n\t\t\t\t\t\twaitRate.sleep()\n\t\t\t\t\n\t\t\t# Close file and serial port\n\t\t\tf.close()\n\t\t\tfileNames.pop(0) #Remove the first filename in the list\n\t\t\trospy.loginfo('Finished the run!')\n\t\t\tpubComplete.publish(True)\n\n\t\telif fileNames and not homed:\n\t\t\tfileNames=[]\n\t\t\trospy.logerr('---coordinateMoves: PLEASE HOME STAGE')\n\t\t\tif DEBUG:\n\t\t\t\tprint(fileNames)\n\n\t\touterRate.sleep()\t\n\n\n\nif __name__ == '__main__':\n\ttry:\n\t\tserver()\n\texcept rospy.ROSInterruptException:\n\t\tpass\n\n\n\n","repo_name":"cbteeple/watertank-gantry","sub_path":"ROS/catkin_ws/src/gantry_control/scripts/coordinateMoves.py","file_name":"coordinateMoves.py","file_ext":"py","file_size_in_byte":4413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"35048095917","text":"class Solution:\n def reverseString(self, s: List[str]) -> None:\n \"\"\"\n Do not return anything, modify s in-place instead.\n \"\"\"\n leftPtr, rightPtr = 0, len(s)-1 #two pointers at two ends of the string\n while leftPtr < rightPtr:\n s[leftPtr], s[rightPtr] = s[rightPtr], s[leftPtr] # swap the values\n leftPtr = leftPtr + 1 # increment left pointer\n rightPtr = rightPtr - 1 # decrement right pointer\n","repo_name":"fahimtakin/LeetCode","sub_path":"Python/344-ReverseString.py","file_name":"344-ReverseString.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"6802670820","text":"# This scripts takes the data that is in separate CSV files,\n# concatenates them and splits into train, validation and test sets.\n\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nimport glob\nimport os\n\ndef load_data():\n # Load in data\n all_files = glob.glob(\"../data/parl_speeches_1980-1989/*.csv\")\n df = pd.concat((pd.read_csv(f) for f in all_files))\n #mask = df['year'].isin(list(range(2010,2020)))\n #df = df[mask]\n return df\n\ndef inspect_data(data, name):\n # Let's take a look at the data and how the data are distributed\n print(f'First rows of {name}:')\n print(data.head(10))\n print(f'Label distribution in {name}:')\n print(data['label'].value_counts(normalize=True))\n print(f'Year distribution in {name}:')\n print(data['year'].value_counts(normalize=True))\n print(f'Shape of {name}:')\n print(data.shape)\n\ndef replace_id(data):\n data.drop(columns = ['id'], inplace=True)\n ids = []\n for i in range(len(data)):\n ids.append(i)\n data.insert(0, 'id', ids, True)\n return data\n\ndef filter(data, num_parties = 3):\n # Labels 0, 1 and 2 (SD, KOK, KESK) are vastly overrepresented\n\n if num_parties == 3:\n # Let's just keep those three parties to get an even distribution\n print(\"Dropping smaller parties...\")\n bool_mask = data['label'] < 3\n data = data[bool_mask]\n # Let's shuffle the data so it is no longer in chronological order\n data_sample = data.sample(frac=1, random_state = 1234).reset_index(drop=True)\n # Let's print dataset head to see how it looks\n print('Data after shuffling:')\n print(data_sample.head(20))\n return data_sample\n\ndef create_splits(data):\n # Split original DataFrame into training and testing sets\n train, test = train_test_split(data, test_size=0.2, random_state=0)\n\n # Split the test data further in to test and validation sets\n test, validation = train_test_split(test, test_size=0.5, random_state=0)\n \n return train, validation, test\n\ndef save_data(datasets, names, num):\n if num == 3:\n num = 'three'\n elif num == 8:\n num = 'eight'\n print('Saving data...')\n for data, name in zip(datasets, names):\n data.to_csv(f'../data/datasets_by_decade/parl_speeches_80s_{num}parties_{name}.csv', index = False)\n\n\ndef main():\n num_parties = 8 #choose either 3 or 8 depending on how many parties you want in the final data\n data = load_data()\n inspect_data(data, 'data')\n data = replace_id(data)\n data = filter(data, num_parties)\n train, validation, test = create_splits(data)\n save_data([train, validation, test], ['train', 'validation', 'test'], num_parties)\n\nif __name__ == '__main__':\n main()\n","repo_name":"TurkuNLP/parliament-speech-project","sub_path":"create_datasets.py","file_name":"create_datasets.py","file_ext":"py","file_size_in_byte":2722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"3418793518","text":"import os\r\nimport shutil\r\n\r\n\r\ndef Extensao(file_name):\r\n splited = file_name.split(\".\")\r\n return splited[-1]\r\n# declara a função extensão com a variavel do nome do arquivo para poder\r\n# vizualizar\r\n\r\n\r\ndef Validate_path(path):\r\n if os.path.isdir(path):\r\n return True\r\n else:\r\n os.mkdir(path)\r\n# Cria pasta caso não tenha, o True é para vizualizar no debug\r\n\r\n\r\nraiz = os.path.expanduser('~\\\\')\r\nimg_path = os.path.join(raiz, \"Pictures\")\r\n# Acha a pasta raiz e define a pasta padrão como Imagens/Pictures\r\nExt_list = {\r\n \"png\": os.path.join(img_path, \"png\"),\r\n \"pdf\": os.path.join(img_path, \"pdf\"),\r\n \"jpeg\": os.path.join(img_path, \"jpeg\"),\r\n \"gif\": os.path.join(img_path, \"gif\"),\r\n \"mp4\": os.path.join(img_path, \"mp4\"),\r\n \"exe\": os.path.join(img_path, \"exe\"),\r\n \"zip\": os.path.join(img_path, \"zip\"),\r\n \"jpeg\": os.path.join(img_path, \"jpeg\"),\r\n \"jpg\": os.path.join(img_path, \"jpg\"),\r\n \"outros\": os.path.join(img_path, \"outros\")}\r\n\r\nfor Ext_name in Ext_list:\r\n Validate_path(Ext_list[Ext_name])\r\n # Convoca o nome da extensão dentro do dicionario de extensões\r\n\r\nimg_path_list_dir = os.listdir(img_path)\r\nfor file in img_path_list_dir:\r\n file_path = os.path.join(img_path, file)\r\n if os.path.isfile(file_path):\r\n\r\n # print(file_path,Ext)\r\n Ext = Extensao(file)\r\n if Ext in Ext_list:\r\n shutil.copy(file_path, Ext_list[Ext])\r\n else:\r\n shutil.copy(file_path, Ext_list[\"outros\"])\r\n\r\n# print(img_path_list_dir)\r\n","repo_name":"flagelus/Pasta-Organizada-em-Python","sub_path":"pastaorganizada.py","file_name":"pastaorganizada.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"20238279400","text":"'''\nReal Data\n'''\ndata = open('day8-input.txt').readlines()\n\n'''\nExample Data\n'''\n# data = [\n# 'b inc 5 if a > 1',\n# 'a inc 1 if b < 5',\n# 'c dec -10 if a >= 1',\n# 'c inc -20 if c == 10'\n# ]\n\n'''\nConstants\n'''\nPRINT_VERBOSE = False\nFUNCS = {\n '>': lambda a,b: a > b,\n '>=': lambda a,b: a >= b,\n '<': lambda a,b: a < b,\n '<=': lambda a,b: a <= b,\n '==': lambda a,b: a == b,\n '!=': lambda a,b: a != b,\n 'inc': lambda a,b: a + b,\n 'dec': lambda a,b: a - b\n}\n\ndef print_debug(data):\n if not PRINT_VERBOSE: return\n print('{}'.format(data))\n\n'''\nPart 1 - Parse Registers and implement action\n'''\nregisters = {}\npart2 = 0\n\nfor line in data:\n print_debug('line: {}'.format(line))\n reg, op, val, _, con_reg, con_op, con_val = line.split()\n if FUNCS[con_op](registers.get(con_reg, 0), int(con_val)):\n registers[reg] = FUNCS[op](registers.get(reg, 0), int(val))\n part2 = max(part2, registers.get(reg, 0))\n\nprint('Part 1:', max(registers.values()))\n\n'''\nPart 2 - Track what the largest value was throughout the operations\n'''\nprint('Part 2:', part2)\n","repo_name":"Rudedog9d/advent-of-code","sub_path":"2017/day8.py","file_name":"day8.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"22455727016","text":"import pygame\nimport pytmx\nfrom settings import *\n\nclass TiledMap:\n def __init__(self, filename):\n tm = pytmx.load_pygame(filename, pixelalpha=True)\n self.width = tm.width * tm.tilewidth\n self.height = tm.height * tm.tileheight\n self.tmxdata = tm\n\n def render(self, surface):\n ti = self.tmxdata.get_tile_image_by_gid\n for layer in self.tmxdata.visible_layers:\n if isinstance(layer, pytmx.TiledTileLayer):\n for x, y, gid, in layer:\n tile = ti(gid)\n if tile:\n surface.blit(tile, (x * self.tmxdata.tilewidth,\n y * self.tmxdata.tileheight))\n\n def make_map(self):\n temp_surface = pygame.Surface((self.width, self.height))\n self.render(temp_surface)\n return temp_surface\n\nclass Map:\n def __init__(self, filename):\n self.data = []\n with open(filename, 'r') as f:\n for line in f:\n self.data.append(line) \n self.mapwidth = len(self.data[0])\n self.mapheight = len(self.data)\n self.width = self.mapwidth * TILESIZE\n self.height = self.mapheight * TILESIZE\n\nclass Camera:\n def __init__(self, w, h):\n self.camera = pygame.Rect(0, 0, w, h)\n self.width = w\n self.height = h \n\n def apply(self, e):\n return e.rect.move(self.camera.topleft)\n\n def apply_rect(self, rect):\n return rect.move(self.camera.topleft)\n\n def update(self, target):\n x = -target.rect.x + int(SCREEN_WIDTH / 2)\n y = -target.rect.y + int(SCREEN_HEIGHT / 2)\n x, y = min(0, x), min(0, y)\n x = max(-(self.width - (SCREEN_WIDTH + TILESIZE)), x) \n y = max(-(self.height - SCREEN_HEIGHT), y)\n self.camera = pygame.Rect(x, y, self.width, self.height)\n","repo_name":"randrawll/ThisIsEndGamePy","sub_path":"map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"44733040106","text":"import matplotlib.pyplot as plt\nimport sys\nfrom math import sqrt, exp\n\n_, filename = sys.argv[:2]\nPI = 3.1416\na = 1e-2\n\nmax_r = 400\n\nfp = open(filename, 'r')\nlines = [u.split() for u in fp.readlines()[2:]]\nfp.close()\n\nd = []\n\nn_temp = []\n\nfor line in lines:\n if 'END' in line:\n d.append(n_temp)\n n_temp = []\n\n else:\n p = [float(u)*1e10 for u in line]\n n_temp.append(p)\n\n\ndef delta(x):\n return exp(-((x/a)**2))/(a*sqrt(PI))\n\n\nn_atoms = len(d[0])\n\n# Van Hove\n\n\ndef G(r, t):\n G_r = 0\n for i in range(n_atoms):\n for j in range(n_atoms):\n rj = d[t][j]\n ri = d[0][i]\n\n vec = [rj[0] - ri[0],\n rj[1] - ri[1],\n rj[2] - ri[2]]\n x = sqrt(sum(u**2 for u in vec))\n G_r += delta(x-r)\n\n G_r /= (n_atoms**3)\n return G_r\n\n\ndef G_r_func(t):\n G_r_vs_R = [0 for _ in range(max_r)]\n for r in range(max_r):\n G_val = G(r/10, t)\n G_r_vs_R[r] = G_val\n return G_r_vs_R\n\n\nx_axis = [u/10 for u in range(max_r)]\n\ny_axes = []\ncolors = ['red', 'orange', 'green', 'lightseagreen', 'dodgerblue',\n 'navy', 'slateblue', 'violet', 'magenta', 'deeppink', 'red']\ni = 0\nfor t in range(0, len(d), int(len(d)/11)):\n print(t)\n if i >= len(colors):\n break\n if t != 0:\n y = G_r_func(t)\n plt.plot(x_axis, y, colors[i])\n i += 1\n\nplt.gca().legend([str(u) for u in range(22, len(d), int(len(d)/11))])\nplt.show()\n","repo_name":"VijayrajS/PhySCM_project","sub_path":"Vanhove.py","file_name":"Vanhove.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"35291158940","text":"#Author: Alexander Schwarz\n#Version 1.0\n\n#################################################################################\n# Main file for the gesture recognizer(Version with the crazyflie gesture models)\n#\n# In here the Bluetooth Low Energy (BLE) Communication to the Tactigon will be\n# initialized. The callback of the streaming Characteristic is implemented in\n# the myStreamCB() function and passed to the Tactigon-Wrapper.\n# Also the predictor process and the application process will be initialized\n# and started in here. The predictor and the application processes communicate\n# over queues which are created here.\n# The trained models for the gesture recognizer must be loaded here and then\n# be passed to the predictor process.\n#\n\n\n#################################################################################\n# needed imports\n#\n\nimport struct\nimport binascii\nimport pandas as pd\nimport numpy as np\nimport math\nimport itertools\nfrom sklearn.neural_network import MLPClassifier\nfrom predictor import Data3D\nfrom predictor import Predictor\n#from FakePredictor import Predictor\nfrom multiprocessing import JoinableQueue\nfrom application import Application\n\nfrom joblib import load\n\nfrom TwoWayComm import BTCommunication, LED_COLOR, LED_MODE, DIMENSION, USAGE, MODE, GNORM\nimport time\n\nTACTI_ACC_COMPRESSION = 819\nTACTI_GYRO_COMPRESSION = 32\n\n\n#################################################################################\n# load trained models\n#\n\n#early predictors\nstart_clf_20 = load('./models/crazyFlyGestures/start_nn_20.joblib')\nstartR_clf_20 = load('./models/crazyFlyGestures/startR_nn_20.joblib')\nland_clf_20 = load('./models/crazyFlyGestures/land_nn_20.joblib')\nlandR_clf_20 = load('./models/crazyFlyGestures/landR_nn_20.joblib')\nwp_back_clf_20 = load('./models/crazyFlyGestures/wp_back_nn_20.joblib')\nwp_backR_clf_20 = load('./models/crazyFlyGestures/wp_backR_nn_20.joblib')\nwp_del_clf_20 = load('./models/crazyFlyGestures/wp_del_nn_20.joblib')\nwp_next_clf_20 = load('./models/crazyFlyGestures/wp_next_nn_20.joblib')\nwp_nextR_clf_20 = load('./models/crazyFlyGestures/wp_nextR_nn_20.joblib')\nwp_set_clf_20 = load('./models/crazyFlyGestures/wp_set_nn_20.joblib')\nstillGesture_clf_20 = load('./models/crazyFlyGestures/stillGesture_nn_20.joblib')\n\n#moveDown_clf_20 = load('./models/crazyFlyGestures/moveDown_nn_20.joblib')\n#moveUp_clf_20 = load('./models/crazyFlyGestures/moveUp_nn_20.joblib')\n#pullUp_clf_20 = load('./models/crazyFlyGestures/pullUp_nn_20.joblib')\n#swingAndPointForward_clf_20 = load('./models/crazyFlyGestures/swingAndPointForward_nn_20.joblib')\n#rotateLeft_clf_20 = load('./models/crazyFlyGestures/rotateLeft_nn_20.joblib')\n#rotateRight_clf_20 = load('./models/crazyFlyGestures/rotateRight_nn_20.joblib')\n#stillGesture_clf_20 = load('./models/crazyFlyGestures/stillGesture_nn_20.joblib')\n\n#validation predictores\nstart_clf_40 = load('./models/crazyFlyGestures/start_nn_40.joblib')\nstartR_clf_40 = load('./models/crazyFlyGestures/startR_nn_40.joblib')\nland_clf_40 = load('./models/crazyFlyGestures/land_nn_40.joblib')\nlandR_clf_40 = load('./models/crazyFlyGestures/landR_nn_40.joblib')\nwp_back_clf_40 = load('./models/crazyFlyGestures/wp_back_nn_40.joblib')\nwp_backR_clf_40 = load('./models/crazyFlyGestures/wp_backR_nn_40.joblib')\nwp_del_clf_40 = load('./models/crazyFlyGestures/wp_del_nn_40.joblib')\nwp_next_clf_40 = load('./models/crazyFlyGestures/wp_next_nn_40.joblib')\nwp_nextR_clf_40 = load('./models/crazyFlyGestures/wp_nextR_nn_40.joblib')\nwp_set_clf_40 = load('./models/crazyFlyGestures/wp_set_nn_40.joblib')\nstillGesture_clf_40 = load('./models/crazyFlyGestures/stillGesture_nn_40.joblib')\n\n#moveDown_clf_40 = load('./models/crazyFlyGestures/moveDown_nn_40.joblib')\n#moveUp_clf_40 = load('./models/crazyFlyGestures/moveUp_nn_40.joblib')\n#pullUp_clf_40 = load('./models/crazyFlyGestures/pullUp_nn_40.joblib')\n#swingAndPointForward_clf_40 = load('./models/crazyFlyGestures/swingAndPointForward_nn_40.joblib')\n#rotateLeft_clf_40 = load('./models/crazyFlyGestures/rotateLeft_nn_40.joblib')\n#rotateRight_clf_40 = load('./models/crazyFlyGestures/rotateRight_nn_40.joblib')\n#stillGesture_clf_40 = load('./models/crazyFlyGestures/stillGesture_nn_40.joblib')\n\n#list of predictors\nearlyPredictors = [start_clf_20, startR_clf_20, land_clf_20, landR_clf_20, wp_back_clf_20 , wp_backR_clf_20, wp_del_clf_20, wp_next_clf_20, wp_nextR_clf_20, wp_set_clf_20,stillGesture_clf_20]\nvalidationPredictors = [start_clf_40, startR_clf_40, land_clf_40, landR_clf_40, wp_back_clf_40 , wp_backR_clf_40, wp_del_clf_40, wp_next_clf_40, wp_nextR_clf_40, wp_set_clf_40,stillGesture_clf_40]\n\n#earlyPredictors = [moveDown_clf_20, moveUp_clf_20, pullUp_clf_20, swingAndPointForward_clf_20, rotateLeft_clf_20, rotateRight_clf_20, stillGesture_clf_20]\n#validationPredictors = [moveDown_clf_40, moveUp_clf_40, pullUp_clf_40, swingAndPointForward_clf_40, rotateLeft_clf_40, rotateRight_clf_40, stillGesture_clf_40]\n\n#class_names of the gestures\n#these must be in the same order as the predictors\n#class_names = ['movPosX', 'movPosY', 'movPosZ', 'movNegX', 'movNegY', 'movNegZ', 'stillGesture']\n\nclass_names = ['start', 'startR', 'land', 'landR', 'wp_back', 'wp_backR','wp_del','wp_next','wp_nextR', 'wp_set','stillGesture']\n\n\n#################################################################################\n# create communication queues\n#\n\nwrapper_to_predictor_queue = JoinableQueue()\npredictor_to_application_queue = JoinableQueue()\ndirect_control_queue = JoinableQueue()\n\nmode_switch_flag = False\n\n\n#################################################################################\n# BLE data streaming callback\n#\n# Handles a dataframe every 20ms\n# There are different dataframes that can approach\n#\n# Application Mode 3D (mode == 1)\n#\n# -----------------------------------------------------------------------------------------------------\n# | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 |\n# -----------------------------------------------------------------------------------------------------\n# | timestamp | accX | accY | accZ | gyroX | gyroY | gyroZ | b1 | b2 | b3 | b4 | unused | mode |\n# -----------------------------------------------------------------------------------------------------\n#\n# Application Mode 2D (mode == 2)\n#\n# -----------------------------------------------------------------------------------------------------\n# | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 |\n# -----------------------------------------------------------------------------------------------------\n# | timestamp | accX | accY | gyroZ | unused | b1 | b2 | b3 | b4 | unused | mode |\n# -----------------------------------------------------------------------------------------------------\n#\n# Direct Control Mode (mode == 3)\n#\n# ---------------------------------------------------------------------------------------------------------\n# | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 |\n# ---------------------------------------------------------------------------------------------------------\n# | event | roll | pitch | yaw | unused | b1 | b2 | b3 | b4 | unused | mode |\n# ---------------------------------------------------------------------------------------------------------\n#\n\ndef myStreamCB(cHandle, data):\n \"\"\" callback for new fresh data\n on streaming characteristic\n \"\"\"\n global mode_switch_flag\n\n mode, switch = check_mode_switch(data)\n if not mode_switch_flag:\n if switch == 1:\n mode_switch_flag = True\n if mode == 1:\n btComm.setMode(MODE.DIRECT_CONTROL)\n elif mode == 3:\n btComm.setMode(MODE.APPLICATION)\n else:\n if switch == 0:\n mode_switch_flag = False\n\n wrapper_to_predictor_queue.put(data)\n\n\ndef check_mode_switch(data):\n mode = data[19]\n #button for mode switch\n #in the dataframe bytes 14 to 17 are mapped to the buttons of the T-Skin\n #byte 14 is button 1 ... byte 17 is button 4\n button = data[17]\n return mode, button\n\n\n########################################################################\n############### execution starts here ###########################\n########################################################################\n\n\n# create communication class\nbtComm = BTCommunication()\n\nversion = btComm.getVersion()\nprint(\"comm version: {}\".format(version))\n\n\n#set Call Back for data stream from TSkin\nbtComm.setStreamCallBack(myStreamCB)\n\n\n#connect to TACTI\nbtComm.stayConnected()\ntime.sleep(3)\n\n\n\n#some inits\nbtComm.setMode(MODE.APPLICATION)\nbtComm.setDimension(DIMENSION.DIM_3D)\nbtComm.setUsage(USAGE.TSKIN_RIGHT)\nbtComm.setGNorm(GNORM.NORM)\n#btComm.setThresholds(10, 20, 30)\nbtComm.setLed(LED_MODE.FAST_BLINK, LED_COLOR.RED)\nbtComm.startSendingData()\n\n#create and start processes for the predictor and the application\npred = Predictor(wrapper_to_predictor_queue, predictor_to_application_queue, direct_control_queue, earlyPredictors, validationPredictors, class_names)\npred.start()\napp = Application(predictor_to_application_queue, direct_control_queue)\napp.start()\n\n#main loop\ndone = False\nwhile not done:\n \n #automatically reconnect in case of connection lost\n #In case of reconnection, previous settings (mode, dimension, ...) \n #are automatically re-applied\n #btComm.stayConnected()\n pass","repo_name":"MirkoMettendorf/SoftwareProjekt_DroneControl","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"42238807547","text":"import math\nimport random\nimport glob\nimport click\nimport ffcv\nfrom ffcv.transforms import Convert, ToTensor, ToTorchImage\nfrom ffcv.fields.decoders import NDArrayDecoder, IntDecoder\n#import timm\nfrom torchvision.models import vgg11\nimport torch\nimport torch.nn as nn\nfrom torch.optim.lr_scheduler import OneCycleLR, CyclicLR\nfrom torch.nn.functional import mse_loss, log_softmax, tanh, relu\nfrom accelerate import Accelerator\nfrom accelerate.utils import LoggerType\nfrom tqdm import tqdm\nimport numpy as np\nimport neptune\n\n\nclass ResBlock(nn.Module):\n def __init__(self, inplanes=256, planes=256, stride=1, downsample=None):\n super().__init__()\n self.conv1 = nn.Conv2d(\n inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False\n )\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(\n planes, planes, kernel_size=3, stride=stride, padding=1, bias=False\n )\n self.bn2 = nn.BatchNorm2d(planes)\n\n def forward(self, x):\n residual = x\n out = self.conv1(x)\n out = relu(self.bn1(out))\n out = self.conv2(out)\n out = self.bn2(out)\n out += residual\n out = relu(out)\n return out\n\n\nclass ChessModel(torch.nn.Module):\n N_RES_BLOCKS = 19\n\n def __init__(self):\n super().__init__()\n\n # 8 boards (14 channels each) + meta (7 channels)\n self.conv_block = nn.Sequential(\n nn.Conv2d(14 * 8 + 7, 256, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(inplace=True),\n )\n\n self.res_blocks = nn.ModuleList([ResBlock() for _ in range(self.N_RES_BLOCKS)] )\n\n self.value_head = nn.Sequential(\n nn.Conv2d(256, 1, kernel_size=1, bias=False),\n nn.BatchNorm2d(1),\n nn.Flatten(),\n nn.Linear(64, 64),\n nn.LeakyReLU(inplace=True),\n nn.Linear(64, 1),\n nn.Tanh(),\n )\n\n self.policy_head = nn.Sequential(\n nn.Conv2d(256, 128, kernel_size=1, bias=False),\n nn.BatchNorm2d(128),\n nn.LeakyReLU(inplace=True),\n nn.Flatten(),\n nn.Linear(8*8*128, 8*8*73),\n nn.LogSoftmax(dim=1),\n )\n\n def forward(self, inp):\n x = self.conv_block(inp)\n\n for block in self.res_blocks:\n x = block(x)\n\n v1 = self.policy_head(x)\n v2 = self.value_head(x)\n return v1, v2\n\n\ndef get_grad_l2(model):\n l2 = []\n for n, p in model.named_parameters():\n v = torch.norm(p.grad, p=2).detach().cpu().item()\n\n #if math.isnan(v) or math.isinf(v):\n # vp = torch.norm(p, p=2).detach().cpu().item()\n # print(\"grad l2 nan: \", n, v, vp)\n\n l2.append(v)\n\n return np.mean(l2)\n\n\n@click.command()\n@click.option(\"--init-lr\", default=0.001)\n@click.option(\"--num-epochs\", default=30)\n@click.option(\"--model-ver\", default=0)\n@click.option(\"--model-prefix\", default=\"v\")\n@click.option(\"--load-prev-ckpt\", default=True)\ndef main(init_lr, num_epochs, model_ver, model_prefix, load_prev_ckpt):\n\n run = neptune.init_run(project=\"jiasen/smart-chess\")\n run[\"parameters\"] = {\"init_lr\": init_lr, \"num_epochs\": num_epochs, \"model_ver\": model_ver}\n\n model = ChessModel()\n\n dataloaders = []\n\n datasets = glob.glob(f\"{model_prefix}{model_ver}/dataset/**/*.beton\", recursive=True)\n\n for dataset in datasets:\n dl = ffcv.Loader(\n dataset,\n batch_size=8,\n order=ffcv.loader.OrderOption.RANDOM,\n pipelines={\n \"board\": [NDArrayDecoder(), ToTensor(), ToTorchImage(), Convert(torch.float32)],\n \"move\": [NDArrayDecoder(), ToTensor()],\n \"outcome\": [IntDecoder(), ToTensor(), Convert(torch.float32)],\n }\n )\n dataloaders.append(dl)\n\n num_total = sum(len(dl) for dl in dataloaders)\n\n #optimizer = torch.optim.AdamW(params=model.parameters(), lr=init_lr, eps=1e-4, weight_decay=1e-3)\n optimizer = torch.optim.SGD(params=model.parameters(), lr=1e-4, weight_decay=1e-4)\n\n lr_scheduler = OneCycleLR(\n optimizer=optimizer,\n max_lr=init_lr,\n epochs=num_epochs,\n steps_per_epoch=num_total\n )\n\n accelerator = Accelerator(\n mixed_precision=\"fp16\",\n #dynamo_backend=\"INDUCTOR\",\n )\n\n model = accelerator.prepare(model)\n\n if model_ver >= 1 and load_prev_ckpt:\n print(\"Loading previous model.\")\n accelerator.load_state(f\"{model_prefix}{model_ver-1}/checkpoint\")\n\n optimizer, lr_scheduler = accelerator.prepare(\n optimizer, lr_scheduler\n )\n\n dataloaders = list(accelerator.prepare(*dataloaders))\n\n step = 0\n for epoch in range(num_epochs):\n\n model.train()\n accelerator.print(f\"epoch {epoch}\")\n\n with tqdm(total=num_total) as pbar:\n random.shuffle(dataloaders)\n for dl in dataloaders:\n for batch in dl:\n batch = [v.to(accelerator.device) for v in batch]\n output_distr, output_award = model(batch[0])\n\n # loss computed in float32\n loss1 = - torch.sum(output_distr * batch[1], dim=1).mean()\n loss2 = mse_loss(output_award, batch[2])\n loss = loss1 + loss2\n\n accelerator.backward(loss)\n\n #if accelerator.sync_gradients:\n accelerator.clip_grad_norm_(model.parameters(), 10)\n\n grad_l2 = get_grad_l2(model)\n if math.isnan(grad_l2) or math.isinf(grad_l2):\n grad_l2 = 0\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n\n if accelerator.optimizer_step_was_skipped:\n accelerator.print(\"Skipping one step\")\n\n if accelerator.is_main_process:\n loss1 = loss1.detach().cpu().item()\n loss2 = loss2.detach().cpu().item()\n lr = lr_scheduler.get_last_lr()[0]\n scale = optimizer.scaler.get_scale()\n\n run[\"training/lr\"].log(lr)\n run[\"training/loss1\"].log(loss1)\n run[\"training/loss2\"].log(loss2)\n run[\"training/l2\"].log(grad_l2)\n run[\"training/scale\"].log(scale)\n\n step+=1\n pbar.update()\n\n l2 = 0\n for p in model.parameters():\n l2 += torch.norm(p, p=2)\n l2 = l2.item()\n print(f\"L2: {l2}\")\n run[\"training/l2\"].append(l2)\n\n accelerator.save_state(output_dir=f\"{model_prefix}{model_ver}/checkpoint\")\n run.stop()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"pierric/smart-chess","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"74001710068","text":"\"\"\"\nmodxlib - Utilites used by the prototype CTPS Model Data Explorer for TDM19.\n\nThe uilities fall into the following categories:\n0. Version identification\n1. Trip Table management\n2. TAZ \"shapefile\" management\n3. Utilities for the transit mode\n4. Utilities for working with highway assignment data\n5. Utilities for working with \"skims\"\n6. Utilities for working with dataframes and geodataframes\n\nThis list will be continued.\n\"\"\"\n# modxlib.py - Python module implementing CTPS 'modxlib'\n#\n# Author: Ben Krepp (bkrepp@ctps.org)\n#\n\nimport csv\nimport numpy as np\nimport pandas as pd\nimport geopandas as gp\nfrom dbfread import DBF\nimport pydash\n\n###############################################################################\n#\n# Section 0: Version identification\n#\n_version = \"0.4.0\"\ndef get_version():\n return _version\n# end_def\n\n\n###############################################################################\n#\n# Section 1: Trip table management\n#\nclass TripTableMgr():\n \"\"\" \n Class for trip table utilities for TDM19\n \"\"\"\n _auto_modes = [ 'SOV', 'HOV' ]\n _truck_modes = [ 'Heavy_Truck', 'Heavy_Truck_HazMat', 'Medium_Truck', 'Medium_Truck_HazMat', 'Light_Truck' ]\n _nm_modes = [ 'Walk', 'Bike' ]\n _transit_modes = [ 'DAT_Boat', 'DET_Boat', 'DAT_CR', 'DET_CR', 'DAT_LB', 'DET_LB', 'DAT_RT', 'DET_RT', 'WAT' ]\n _all_modes = _auto_modes + _truck_modes + _nm_modes + _transit_modes\n \n def open_trip_tables(self, scenario_dir):\n \"\"\"\n Function: open_trip_tables - TDM19 implementation\n\n Summary: Given a directory containing the trip tables in OMX format for the \n four daily time periods used by the TDM19 model, open them and return \n a dictionary with the keys 'am', 'md', 'pm', and 'nt' whose\n value is the corresponding open OMX file.\n\n Args: scenario_dir: directory containing TDM19 output for a given scenario;\n the OMX-format trip-tables are found in the 'out' subdirectory \n\n Returns: A dictionary with the keys 'am', 'md', 'pm', and 'nt' whose\n value is the corresponding open OMX file.\n \n Raises: N/A\n \"\"\"\n tt_dir = scenario_dir + '/out/'\n tt_am = tt_dir + 'AfterSC_Final_AM_Tables.omx'\n tt_md = tt_dir + 'AfterSC_Final_MD_Tables.omx'\n tt_pm = tt_dir + 'AfterSC_Final_PM_Tables.omx'\n tt_nt = tt_dir + 'AfterSC_Final_NT_Tables.omx'\n tt_omxs = { 'am' : omx.open_file(tt_am,'r'),\n 'md' : omx.open_file(tt_pm,'r'),\n 'pm' : omx.open_file(tt_pm,'r'),\n 'nt' : omx.open_file(tt_nt,'r') \n } \n return tt_omxs\n #\n def load_trip_tables(self, tt_omxs, modes=None):\n \"\"\"\n Function: load_trip_tables - TDM19 implementation\n\n Summary: Load the trip tables for all time periods the specified list of modes from\n open OMX files into NumPy arrays.\n If no list of modes is passed, trip tables for all modes will be returned.\n\n Args: tt_omxs: Dictionary, keyed by time period identifier ('am', 'md', 'pm', and 'nt'),\n each of whose values is the open OMX trip table file for the corresponding\n time period.\n modes: List of modes (strings) or None\n\n Returns: A two-level dictionary (i.e., first level = time period, second level = mode)\n the second level of which contain the trip table, in the form of a numPy array,\n for the [time_period][mode] in question.\n\n Raises: N/A\n \"\"\"\n if modes == None:\n modes = self._all_modes\n #\n retval = { 'am' : {}, 'md' : {}, 'pm' : {}, 'nt' : {} }\n for period in self._all_time_periods:\n for mode in modes:\n temp = tt_omxs[period][mode]\n retval[period][mode] = np.array(temp)\n # end_for\n # end_for\n return retval\n #\n# class TripTableMgr\n\n\n###############################################################################\n#\n# Section 2: TAZ \"shapefile\" management\n#\n#\nclass TazManager():\n \"\"\"\n class: TazManager\n\n Summary: The class \"TazManager\" provides a set of methods to perform _attribute_ queries\n on an ESRI-format \"Shapefile\" that represents the TAZes in the model region.\n The attributes are read from the Shapefile's .DBF file; other components of\n the Shapefile are ignored.\n \n The Shapefile's .DBF file _must_ contain the following attributes:\n 1. id\n 2. taz\n 3. type - 'I' (internal) or 'E' (external)\n 4. town\n 5. state - state abbreviation, e.g., 'MA'\n 6. town_state - town, state\n 7. mpo - abbreviation of MPO name: \n 8. in_brmpo - 1 (yes) or 0 (no)\n 9. subregion - abbreviation of Boston Region MPO subregion or NULL\n 10. sector - 'analysis sector' as defined by Bill Kuttner.\n Either 'Northeast', 'North', 'Northwest', 'West', 'Southwest',\n 'South', 'Southeast', 'Central' or ''; the empty string ('')\n indicates that the TAZ is outsize of the 164 municipalities\n comprising what was once known as the 'CTPS Model Region'.\n\n An object of class TazManager is instantiated by passing in the fully-qualified path\n to a Shapefile to the class constructor. Hence, it is possible to have more than one\n instance of this class active simultaneously, should this be needed.\n\n Note: For all of the above methods listed bleo that return a \"list of TAZ records\", \n each returned 'TAZ' is a Python 'dict' containing all of the keys (i.e., 'attributes') listed above. \n To convert such a list to a list of _only_ the TAZ IDs, call taz_ids on the list of TAZ records.\n \"\"\"\n _instance = None\n _default_shapefile_dir = r'G:/Data_Resources/modx/canonical_TAZ_shapefile/'\n _default_shapefile_fn = 'candidate_CTPS_TAZ_STATEWIDE_2019.shp'\n # _default_fq_shapefile_fn = _default_base + _default_shapefile_fn\n _taz_table = []\n \n def __init__(self, my_shapefile_dir=None, my_shapefile_fn=None):\n # print('Creating the TazManager object.')\n if my_shapefile_dir == None:\n my_shapefile_dir = self._default_shapefile_dir\n if my_shapefile_fn == None:\n my_shapefile_fn = self._default_shapefile_fn\n #\n my_shapefile_fq_fn = my_shapefile_dir + my_shapefile_fn\n # Derive name of .dbf file \n my_dbffile_fn = my_shapefile_fq_fn.replace('.shp', '.dbf')\n dbf_table = DBF(my_dbffile_fn, load=True)\n for record in dbf_table.records:\n new = {}\n new['id'] = int(record['id'])\n new['taz'] = int(record['taz'])\n new['type'] = record['type']\n new['town'] = record['town']\n new['state'] = record['state']\n new['town_state'] = record['town_state']\n new['mpo'] = record['mpo']\n new['in_brmpo'] = int(record['in_brmpo'])\n new['subregion'] = record['subregion']\n new['sector'] = record['sector']\n self._taz_table.append(new)\n # end_for\n dbf_table.unload()\n print('Number of records read = ' + str(len(self._taz_table)))\n return self._instance\n # end_def __init__()\n \n # For debugging during development:\n def _get_tt_item(self, index):\n return self._taz_table[index]\n \n def mpo_to_tazes(self, mpo):\n \"\"\"\n mpo_to_tazes(mpo): Given the name (i.e., abbreviation) of an MPO,\n return a list of the records for the TAZes in it\n \"\"\"\n retval = pydash.collections.filter_(self._taz_table, lambda x: x['mpo'] == mpo)\n return retval\n \n def brmpo_tazes(self):\n \"\"\"\n brmpo_tazes(self) - Return the list of the records for the TAZes in the Boston Region MPO\n \"\"\"\n retval = pydash.collections.filter_(self._taz_table, lambda x: x['in_brmpo'] == 1)\n return retval\n \n def brmpo_town_to_tazes(self, mpo_town):\n \"\"\"\n brmpo_town_to_tazes(town) - Given the name of a town in the Boston Region MPO,\n return a list of the records for the TAZes in it\n \"\"\"\n retval = pydash.collections.filter_(self._taz_table, lambda x: x['in_brmpo'] == 1 and x['town'] == mpo_town)\n return retval\n \n def brmpo_subregion_to_tazes(self, mpo_subregion):\n \"\"\"\n brmpo_subregion_to_tazes(subregion) - Given the name (i.e., abbreviation) of a Boston Region MPO subregion,\n return a list of the records for the TAZes in it\n \"\"\"\n # We have to be careful as some towns are in two subregions,\n # and for these the 'subregion' field of the table contains\n # an entry of the form 'SUBREGION_1/SUBREGION_2'.\n retval = []\n if subregion == 'ICC':\n retval = pydash.collections.filter_(self._taz_table, \n lambda x: x['subregion'].find('ICC') != -1)\n elif subregion == 'TRIC':\n retval = pydash.collections.filter_(self._taz_table, \n lambda x: x['subregion'].find('TRIC') != -1)\n elif subregion == 'SWAP':\n retval = pydash.collections.filter_(self._taz_table,\n lambda x: x['subregion'].find('SWAP') != -1)\n else:\n retval = pydash.collections.filter_(self._taz_table, lambda x: x['subregion'] == mpo_subregion)\n # end_if\n return retval\n # end_def mpo_subregion_to_tazes()\n \n def sector_to_tazes(self, sector):\n \"\"\"\n sector_to_tazes - Given the name of an 'analysis sector', return the list of the records for the TAZes\n in the sector.\n \"\"\"\n retval = pydash.collections.filter_(self._taz_table, lambda x: x['sector'] == sector)\n return retval\n \n # Note: Returns TAZes in town _regardless_ of state.\n def town_to_tazes(self, town):\n \"\"\"\n town_to_tazes(town) - Given the name of a town, return the list of the records for the TAZes in the town.\n Note: If a town with the same name occurs in more than one state, the list of TAZes\n in _all_ such states is returned.\n \"\"\"\n retval = pydash.collections.filter_(self._taz_table, lambda x: x['town'] == town)\n return retval\n \n def town_state_to_tazes(self, town, state):\n \"\"\"\n town_state_to_tazes(town, state) - Given a town and a state abbreviation (e.g., 'MA'),\n return the list of records for the TAZes in the town.\n \"\"\"\n retval = pydash.collections.filter_(self._taz_table, lambda x: x['state'] == state and x['town'] == town)\n return retval\n \n def state_to_tazes(self, state):\n \"\"\"\n state_to_tazes(state) - Given a state abbreviation, return the list of records for the TAZes in the state.\n \"\"\"\n retval = pydash.collections.filter_(self._taz_table, lambda x: x['state'] == state)\n return retval\n \n def taz_ids(self, taz_record_list):\n \"\"\"\n taz_ids(TAZ_record_list) - Given a list of TAZ records, return a list of _only_ the TAZ IDs from those records.\n \"\"\"\n retval = []\n for taz in taz_record_list:\n retval.append(taz['id'])\n # end_for\n return retval\n #\n# end_class TazManager\n\n\n###############################################################################\n#\n# Section 3: Miscellaneous utilities for the transit mode\n#\n\n# NOTE: The mode-to-metamode mapping machinery is VERY specific to TDM19.\n# It will problably NOT be required at all in TDM23.\n_mode_to_metamode_mapping_table = {\n 1: 'MBTA Bus',\n 2: 'MBTA Bus',\n 3: 'MBTA Bus' ,\n 4: 'Green Line',\n 5: 'Red Line',\n 6: 'Mattapan Trolley',\n 7: 'Orange Line',\n 8: 'Blue Line',\n 9: 'Commuter Rail',\n 10: 'Ferries',\n 11: 'Ferries',\n 12: 'Silver Line',\n 13: 'Sliver Line',\n 14: 'Logan Express',\n 15: 'Logan Shuttle',\n 16: 'MGH and Other Shuttles',\n 17: 'RTA Bus',\n 18: 'RTA Bus',\n 19: 'RTA Bus',\n 20: 'RTA Bus',\n 21: 'RTA Bus',\n 22: 'RTA Bus',\n 23: 'Private Bus',\n 24: 'Private Bus',\n 25: 'Private Bus',\n 26: 'Private Bus',\n 27: 'Private Bus',\n 28: 'Private Bus',\n 29: 'Private Bus',\n 30: 'Private Bus - Yankee',\n 31: 'MBTA Subsidized Bus Routes',\n 32: 'Commuter Rail',\n 33: 'Commuter Rail',\n 34: 'Commuter Rail',\n 35: 'Commuter Rail',\n 36: 'Commuter Rail',\n 37: 'Commuter Rail',\n 38: 'Commuter Rail',\n 39: 'Commuter Rail',\n 40: 'Commuter Rail',\n 41: 'RTA Bus',\n 42: 'RTA Bus',\n 43: 'RTA Bus',\n 70: 'Walk' }\n#\ndef mode_to_metamode(mode):\n \"\"\"\n Function: mode_to_metamode\n\n Summary: Given one of the 50+ transportation \"modes\" supported by the TDM, return its \"meta mode\".\n For example, the model supports 3 different \"modes\" for MBTA bus routes; all three of \n these have the common \"metamode\" of 'MBTA_Bus'.\n\n Args: mode: String identifying one of the transporation \"modes\" supported by the TDM.\n\n Returns: String representing the input mode's \"metamode.\"\n\n Raises: N/A\n \"\"\"\n retval = 'None'\n if mode in _mode_to_metamode_mapping_table:\n return _mode_to_metamode_mapping_table[mode]\n # end_if\n return retval\n# mode_to_metamode()\n\n\ndef calculate_total_daily_boardings(boardings_by_tod):\n \"\"\"\n Function: calculate_total_daily_boardings\n \n Summary: Calculate the daily total boardings across all time periods.\n This calculation requires a bit of subtelty, because the number of rows in the four\n data frames produced by produced in the calling function is NOT necessarily the same. \n A brute-force apporach will not work, generally speaking.\n See comments in the code below for details.\n \n NOTE: This is a helper function for import_transit_assignment, which see.\n \n Args: boardings_by_tod: a dict with the keys 'AM', 'MD', 'PM', and 'NT'\n for which the value of each key is a data frame containing the total\n boardings for the list of routes specified in the input CSV file.\n \n Returns: The input dict (boardings_by_tod) with an additional key 'daily'\n the value of which is a dataframe with the total daily boardings\n for all routes specified in the input CSV across all 4 time periods.\n \n Raises: N/A\n \"\"\"\n am_results = boardings_by_tod['AM']\n md_results = boardings_by_tod['MD']\n pm_results = boardings_by_tod['PM']\n nt_results = boardings_by_tod['NT']\n #\n # Compute the daily sums.\n #\n # Step 1: Join 'am' and 'md' dataframes\n j1 = pd.merge(am_results, md_results, on=['ROUTE', 'STOP'], how='outer', suffixes=('_am', '_md'))\n # Step 1.1 Replace NaN's with 0's\n j1 = j1.fillna(0)\n #\n # Step 1.2 Compute the 'AM' + 'MD' sums\n j1['DirectTransferOff'] = j1['DirectTransferOff_am'] + j1['DirectTransferOff_md']\n j1['DirectTransferOn'] = j1['DirectTransferOn_am'] + j1['DirectTransferOn_md']\n j1['DriveAccessOn'] = j1['DriveAccessOn_am'] + j1['DriveAccessOn_md']\n j1['EgressOff'] = j1['EgressOff_am'] + j1['EgressOff_md']\n j1['Off'] = j1['Off_am'] + j1['Off_md']\n j1['On'] = j1['On_am'] + j1['On_md']\n j1['WalkAccessOn'] = j1['WalkAccessOn_am'] + j1['WalkAccessOn_md'] \n j1['WalkTransferOff'] = j1['WalkTransferOff_am'] + j1['WalkTransferOff_md']\n j1['WalkTransferOn'] = j1['WalkTransferOn_am'] + j1['WalkTransferOn_md']\n #\n # Step 1.3: Drop un-needed columns\n cols_to_drop = ['DirectTransferOff_am', 'DirectTransferOff_md',\n 'DirectTransferOn_am', 'DirectTransferOn_md',\n 'DriveAccessOn_am', 'DriveAccessOn_md',\n 'EgressOff_am','EgressOff_md',\n 'Off_am', 'Off_md',\n 'On_am', 'On_md',\n 'WalkAccessOn_am', 'WalkAccessOn_md',\n 'WalkTransferOff_am', 'WalkTransferOff_md',\n 'WalkTransferOn_am', 'WalkTransferOn_md']\n #\n j1 = j1.drop(columns=cols_to_drop)\n #\n # Step 2: j2 - join 'pm' and 'nt' data frames\n j2 = pd.merge(pm_results, nt_results, on=['ROUTE', 'STOP'], how='outer', suffixes=('_pm', '_nt'))\n # Step 2.1: Replace NaN's with 0's\n j2 = j2.fillna(0)\n #\n # Step 2.2: Compute the 'PM' + 'NT' sums\n j2['DirectTransferOff'] = j2['DirectTransferOff_pm'] + j2['DirectTransferOff_nt']\n j2['DirectTransferOn'] = j2['DirectTransferOn_pm'] + j2['DirectTransferOn_nt']\n j2['DriveAccessOn'] = j2['DriveAccessOn_pm'] + j2['DriveAccessOn_nt']\n j2['EgressOff'] = j2['EgressOff_pm'] + j2['EgressOff_nt']\n j2['Off'] = j2['Off_pm'] + j2['Off_nt']\n j2['On'] = j2['On_pm'] + j2['On_nt']\n j2['WalkAccessOn'] = j2['WalkAccessOn_pm'] + j2['WalkAccessOn_nt'] \n j2['WalkTransferOff'] = j2['WalkTransferOff_pm'] + j2['WalkTransferOff_nt']\n j2['WalkTransferOn'] = j2['WalkTransferOn_pm'] + j2['WalkTransferOn_nt']\n #\n # Step 2.3: Drop un-needed columns\n cols_to_drop = ['DirectTransferOff_pm', 'DirectTransferOff_nt',\n 'DirectTransferOn_pm', 'DirectTransferOn_nt',\n 'DriveAccessOn_pm', 'DriveAccessOn_nt',\n 'EgressOff_pm','EgressOff_nt',\n 'Off_pm', 'Off_nt',\n 'On_pm', 'On_nt',\n 'WalkAccessOn_pm', 'WalkAccessOn_nt',\n 'WalkTransferOff_pm', 'WalkTransferOff_nt',\n 'WalkTransferOn_pm', 'WalkTransferOn_nt'\n ]\n j2 = j2.drop(columns=cols_to_drop)\n #\n # Step 3: Join \"j1\" and \"j2\" to produce a dataframe with the daily totals\n daily_df = pd.merge(j1, j2, on=['ROUTE', 'STOP'], how='outer', suffixes=('_j1', '_j2'))\n # Step 3.1 : Replace any NaN's with 0's. This line _shouldn't_ be needed - just being extra cautious.\n daily_df = daily_df.fillna(0)\n #\n # Step 3.2 : Compute THE daily sums\n daily_df['DirectTransferOff'] = daily_df['DirectTransferOff_j1'] + daily_df['DirectTransferOff_j2']\n daily_df['DirectTransferOn'] = daily_df['DirectTransferOn_j1'] + daily_df['DirectTransferOn_j2']\n daily_df['DriveAccessOn'] = daily_df['DriveAccessOn_j1'] + daily_df['DriveAccessOn_j2']\n daily_df['EgressOff'] = daily_df['EgressOff_j1'] + daily_df['EgressOff_j2']\n daily_df['Off'] = daily_df['Off_j1'] + daily_df['Off_j2']\n daily_df['On'] = daily_df['On_j1'] + daily_df['On_j2']\n daily_df['WalkAccessOn'] = daily_df['WalkAccessOn_j1'] + daily_df['WalkAccessOn_j2'] \n daily_df['WalkTransferOff'] = daily_df['WalkTransferOff_j1'] + daily_df['WalkTransferOff_j2']\n daily_df['WalkTransferOn'] = daily_df['WalkTransferOn_j1'] + daily_df['WalkTransferOn_j2']\n #\n # Step 3.3 : Drop un-needed columns\n cols_to_drop = ['DirectTransferOff_j1', 'DirectTransferOff_j2',\n 'DirectTransferOn_j1', 'DirectTransferOn_j2',\n 'DriveAccessOn_j1', 'DriveAccessOn_j2',\n 'EgressOff_j1','EgressOff_j2',\n 'Off_j1', 'Off_j2',\n 'On_j1', 'On_j2',\n 'WalkAccessOn_j1', 'WalkAccessOn_j2',\n 'WalkTransferOff_j1', 'WalkTransferOff_j2',\n 'WalkTransferOn_j1', 'WalkTransferOn_j2'\n ]\n daily_df = daily_df.drop(columns=cols_to_drop)\n #\n # Finally, we've got the 'daily' total dataframe!\n boardings_by_tod['daily'] = daily_df\n return boardings_by_tod\n# end_def calculate_total_daily_boardings()\n\n# The following function may be applicable to TDM23 as well as to TDM19.\n# This is currently to be determined.\ndef import_transit_assignment(scenario):\n \"\"\"\n Function: import_transit_assignment\n \n NOTE: This method _should_ work equally well for TDM19 and TDM23.\n Under TDM19, the directory containing the output CSVs may (and often does)\n contain multiple CSVs per mode; under TMD23, it will contain only one CSV\n file per mode. The code will work equally well in both cases.\n For TDM23, some change to the location of the directory containing the \n CSV files may be required.\n \n Summary: Import transit assignment result CSV files for a given scenario.\n \n 1. Read all CSV files for each time period ('tod'), and caclculate the sums for each time period.\n Step 1 can be performed as a brute-force sum across all columns, since the number of rows in\n the CSVs (and thus the dataframes) for any given time period are all the same.\n \n 2. Calculate the daily total across all time periods.\n Step 2 requires a bit of subtelty, because the number of rows in the data frames produced in \n Step 1 is NOT necessarily the same. A brute-force apporach will not work, generally speaking.\n See comments in the code below for details.\n NOTE: This step is performed by the helper function calculate_total_daily_boardings.\n \n Args: scenario: path to directory containing transit assignment results in CSV file format\n \n Returns: a dict of the form:\n { 'AM' : dataframe with totals for the AM period,\n 'MD' : datafrme with totals for the MD period,\n 'PM' : dataframe with totals for the PM period,\n 'NT' : dataframe with totals for the NT period,\n 'daily' : dataframe with totals for the entire day\n }\n \n Raises: N/A\n \"\"\"\n base = scenario + r'out/'\n tods = [\"AM\", \"MD\", \"PM\", \"NT\"]\n # At the end of execution of this function, the dictionary variable'TODsums' will contain all the TOD summed results:\n # one key-value-pair for each 'tod' AND the 'daily' total as well.\n #\n # The dict 'TODsums' is the return value of this function.\n TODsums = { 'AM' : None, 'MD' : None, 'PM' : None, 'NT' : None }\n #\n # Import CSV files and create sum tables for each T-O-D (a.k.a. 'time period').\n for tod in tods:\n # Get full paths to _all_ CSV files for the current t-o-d.\n x = tod + '/' \n fq_csv_fns = glob.glob(os.path.join(base,x,r'*.csv'))\n # \n # 'tablist' : List of all the dataframes created from reading in the all the CSV files for the current t-o-d\n tablist = []\n for csv_file in fq_csv_fns:\n # Read CSV file into dataframe, set indices, and append to 'tablist'\n tablist.append(pd.read_csv(csv_file).set_index(['ROUTE','STOP']))\n #\n # Sum the tables for the current TOD\n TODsums[tod] = reduce(lambda a, b: a.add(b, fill_value=0), tablist)\n # end_for over all tod's\n #\n TODsums = calculate_total_daily_boardings(TODsums)\n #\n # Ensure that the ROUTE and STOP columns of each dataframe in TODsums aren't indices.\n for k in TODsums.keys():\n TODsums[k] = TODsums[k].reset_index()\n #\n return TODsums\n# end_def import_transit_assignment()\n#\n# END of Section 3: Utilities for the transit mode\n###############################################################################\n\n\n###############################################################################\n#\n# Section 4: Utilities for the highway mode\n#\n\n\nclass HighwayAssignmentMgr():\n \"\"\" \n Class for highway assingment utilities\n \"\"\"\n def load_highway_assignment(self, scenario):\n \"\"\"\n Method: load_highway_assignment(self, scenario)\n load TDM19 highway assignment data into eight pandas dataframes in a two-level dictionary\n \n Args: scenario - root directory of TDM19 scenario output\n \n Returns: 8 pandas dataframes organized as a two-level dict (first level = time period, \n second level = { 'auto', 'truck' })\n \n Raises: N/A\n \"\"\"\n link_flow_dir = scenario_dir + 'out/'\n \n am_flow_auto_fn = link_flow_dir + 'AM_MMA_LinkFlow.csv'\n am_flow_truck_fn = link_flow_dir + 'AM_MMA_LinkFlow_Trucks.csv'\n md_flow_auto_fn = link_flow_dir + 'MD_MMA_LinkFlow.csv'\n md_flow_truck_fn = link_flow_dir + 'MD_MMA_LinkFlow_Trucks.csv'\n pm_flow_auto_fn = link_flow_dir + 'PM_MMA_LinkFlow.csv'\n pm_flow_truck_fn = link_flow_dir + 'PM_MMA_LinkFlow_Trucks.csv'\n nt_flow_auto_fn = link_flow_dir + 'NT_MMA_LinkFlow.csv'\n nt_flow_truck_fn = link_flow_dir + 'NT_MMA_LinkFlow_Trucks.csv'\n \n # Read each of the above CSV files containing flow data into a dataframe\n #\n temp_am_auto_df = pd.read_csv(am_flow_auto_fn, delimiter=',')\n temp_am_truck_df = pd.read_csv(am_flow_truck_fn, delimiter=',')\n #\n temp_md_auto_df = pd.read_csv(md_flow_auto_fn, delimiter=',')\n temp_md_truck_df = pd.read_csv(md_flow_truck_fn, delimiter=',')\n #\n temp_pm_auto_df = pd.read_csv(pm_flow_auto_fn, delimiter=',')\n temp_pm_truck_df = pd.read_csv(pm_flow_truck_fn, delimiter=',')\n #\n temp_nt_auto_df = pd.read_csv(nt_flow_auto_fn, delimiter=',')\n temp_nt_truck_df = pd.read_csv(nt_flow_truck_fn, delimiter=',') \n \n retval = { 'am' : { 'auto' : pd.read_csv(am_flow_auto_fn, delimiter=','),\n 'truck': pd.read_csv(am_flow_truck_fn, delimiter=',') },\n 'md' : { 'auto' : pd.read_csv(md_flow_auto_fn, delimiter=','),\n 'truck': pd.read_csv(md_flow_truck_fn, delimiter=',') },\n 'pm' : { 'auto' : pd.read_csv(pm_flow_auto_fn, delimiter=','),\n 'truck': pd.read_csv(pm_flow_truck_fn, delimiter=',') },\n 'nt' : { 'auto' : pd.read_csv(nt_flow_auto_fn, delimiter=','),\n 'truck': pd.read_csv(nt_flow_truck_fn, delimiter=',') }\n }\n return retval\n #\n# class HighwayAssignmentMgr\n\n\n###############################################################################\n#\n# Section 5: Utilities for working with \"skims\"\n#\nclass SkimMgr():\n def open_skims(self, scenario_dir):\n skims_root_dir = scenario_dir + '/out/'\n # Names of time-period-specific skims directories\n skims_dirs = { 'am' : skims_root_dir + r'\\Skims_Am_OMX',\n 'md' : skims_root_dir + r'\\Skims_Md_OMX',\n 'pm' : skims_root_dir + r'\\Skims_Pm_OMX',\n 'nt' : skims_root_dir + r'\\Skims_Nt_OMX'\n } \n # Skim OMX files - one set per time period\n skim_components = { 'DAT_BT' : '_DAT_BT_Skim.omx', \n 'DAT_CR' : '_DAT_CR_Skim.omx', \n 'DAT_LB' : '_DAT_LB_Skim.omx', \n 'DAT_RT' : '_DAT_RT_Skim.omx', \n 'SOV' : '_SOV_Skim.omx', \n 'WAT' : '_WAT_Skim.omx' \n } \n \n tps = self._all_time_periods\n # We will only work with the 'AM' skims, for starters.\n #This is the only sample data we have so far.\n tps = [ 'am' ]\n \n # Return value: data structure in which we will store the opened skim OMXs\n skim_omxs = { 'am' : {}, 'md' : {}, 'pm' : {}, 'nt' : {} }\n for tp in tps:\n for sc in skim_components.keys():\n tp_upper = tp.upper()\n fn = skims_dirs[tp] + '\\\\' + tp_upper + skim_components[sc]\n # temp = omx.open_file(fn, 'r')\n skim_omxs[tp][sc] = omx.open_file(fn, 'r')\n # end_for\n # end_for\n return skim_omxs\n # end_def open_skims()\n #\n def load_skims(self, skim_omxs):\n # stub for now\n pass\n #\n# class SkimMgr_TDM19\n\n\n###############################################################################\n#\n# Section 6: Dataframe and Geo-dataframe utilities\n#\ndef export_df_to_csv(dataframe, csv_fn, column_list=None):\n \"\"\"\n Function: export_df_to_csv\n\n Summary: Export columns in a dataframe to a CSV file.\n If a list of columns to export isn't specified, export all columns.\n\n Args: dataframe: Pandas dataframe\n csv_fn: Name of CSV file\n column_list: List of columns to export, or None\n\n Returns: N/A\n\n Raises: N/A\n \"\"\"\n if column_list != None:\n dataframe.to_csv(csv_fn, column_list, sep=',')\n else:\n dataframe.to_csv(csv_fn, sep=',')\n# end_def\n\ndef export_gdf_to_geojson(geo_dataframe, geojson_fn):\n geo_dataframe.to_file(geojson_fn, driver='GeoJSON')\n# end_def\n\ndef export_gdf_to_shapefile(geo_dataframe, shapefile_fn):\n geo_dataframe.to_file(shapefile_fn, driver='ESRI Shapefile')\n# end_def\n\ndef bbox_of_gdf(gdf):\n \"\"\"\n Function: bbox_of_gdf\n\n Summary: Return the bounding box of all the features in a geo-dataframe.\n\n Args: gdf: a GeoPandas geo-dataframe\n\n Returns: Bounding box of all the features in the input geodataframe.\n The bounding box is returned as a dictionary with the keys: \n { 'minx', 'miny', 'maxx', 'maxy'}.\n\n Raises: N/A\n\"\"\"\n bounds_tuples = gdf['geometry'].map(lambda x: x.bounds)\n bounds_dicts = []\n for t in bounds_tuples:\n temp = { 'minx' : t[0], 'miny' : t[1], 'maxx' : t[2], 'maxy' : t[3] }\n bounds_dicts.append(temp)\n # end_for\n bounds_df = pd.DataFrame(bounds_dicts)\n minx = bounds_df['minx'].min()\n miny = bounds_df['miny'].min()\n maxx = bounds_df['maxx'].max()\n maxy = bounds_df['maxy'].max()\n retval = { 'minx' : minx, 'miny' : miny, 'maxx' : maxx, 'maxy' : maxy }\n return retval\n# end_def bbox_of_gdf()\n\ndef center_of_bbox(bbox):\n \"\"\"\n Function: center_of_bbox\n\n Summary: Given a geometric \"bounding box\", return its center point. \n\n Args: bbox: Bounding box in the form of a dictionary with the keys { 'minx', 'miny', 'maxx', 'maxy' },\n e.g., one returned by bbox_of_gdf.\n\n Returns: Center point of the bounding box as a dictionary with the keys { 'x' , 'y' }.\n\n Raises: N/A\n \"\"\"\n center_x = bbox['minx'] + (bbox['maxx'] - bbox['minx']) / 2\n center_y = bbox['miny'] + (bbox['maxy'] - bbox['miny']) / 2\n retval = { 'x' : center_x, 'y' : center_y }\n return retval\n# end_def center_of_bbox()\n","repo_name":"CTPSSTAFF/modxlib","sub_path":"modxlib.py","file_name":"modxlib.py","file_ext":"py","file_size_in_byte":30614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"24959433757","text":"\"\"\"\r\nfrequencyconfig -- configuration for building OED frequency and currency data\r\n\"\"\"\r\n\r\nimport os\r\nfrom lex import lexconfig\r\n\r\nPIPELINE = [\r\n ('collect_entry_frequencies', 0),\r\n ('collect_all_frequencies', 0),\r\n ('build_csv', 1),\r\n # analysis of the frequency output\r\n ('analyse_frequency_data', 0),\r\n ('compare_with_oec', 0),\r\n ('pos_ratio', 0),\r\n ('rank_entries', 0),\r\n ('ranksample', 0),\r\n # currency\r\n ('raw_currency_data', 0),\r\n ('estimate_currency', 0),\r\n]\r\n\r\nOED_ROOT = lexconfig.OED_DIR\r\nPROJECT_ROOT = os.path.join(OED_ROOT, 'projects', 'frequency')\r\n\r\nFREQUENCY_DIR = lexconfig.OED_FREQUENCY_DIR\r\nFULL_FREQUENCY_DIR = os.path.join(PROJECT_ROOT, 'full_frequency_data')\r\nANALYSIS_DIR = os.path.join(PROJECT_ROOT, 'analysis')\r\nRANKING_FILE = os.path.join(ANALYSIS_DIR, 'ranking.csv')\r\nCURRENCY_DIR = os.path.join(PROJECT_ROOT, 'currency')\r\nCSV_FILE = os.path.join(PROJECT_ROOT, 'oed_frequencies.csv')\r\nOEC_FREQUENCY_FILE = os.path.join(lexconfig.GEL_DIR, 'resources', 'oec',\r\n 'oec_lempos_frequencies.txt')\r\n\r\n# Only entries with last dates between range_start and range_end will\r\n# be evaluated.\r\nRANGE_START = 1700\r\nRANGE_END = 1950\r\n\r\n# Logical currency for a derivative requires the etymon to have either:\r\n# - a last date after range_end;\r\n# - OR a last date after logical_currency_date and size (number of\r\n# quotations) greater than logical_currency_size.\r\nLOGICAL_CURRENCY_DATE = 1850\r\nLOGICAL_CURRENCY_SIZE = 20\r\nLOGICAL_CURRENCY_SUFFIXES1 = 'ing|ed|ness'\r\nLOGICAL_CURRENCY_SUFFIXES2 = 'less|able|ly'\r\n","repo_name":"necrop/oedfrequency_build","sub_path":"frequencyconfig.py","file_name":"frequencyconfig.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"23735249804","text":"#Ashby-Carson-Assn10\n#This is a program that draws shapes dependant on a user input\nimport turtle\nimport math\nimport random\n\ndef reset():\n turtle.clear()\n\n\n\ndef setup():\n turtle.showturtle()\n turtle.speed(10)\n turtle.setworldcoordinates(-500,-400,500,400)\n\n#pattern.drawRectanglePattern(centerX, centerY, offset, width, height, count, rotation)#\n\n\ndef drawRectanglePattern(centerX, centerY, offset, width, height, count, rotation):\n drawTimes = 0\n theta = 0\n dTheta = (2*math.pi) / count\n\n while drawTimes < count:\n setRandomColor()\n drawTimes += 1\n xBar = offset * math.cos(theta)\n yBar = offset * math.sin(theta)\n turtle.penup()\n turtle.goto(centerX + xBar, centerY + yBar)\n turtle.pendown()\n drawRectangle(width, height, rotation)\n rotation += 360 / count\n theta += dTheta\n\n\ndef drawRectangle(width, height, rotation):\n turtle.seth(rotation)\n turtle.forward(height)\n turtle.right(90)\n turtle.forward(width)\n turtle.right(90)\n turtle.forward(height)\n turtle.right(90)\n turtle.forward(width)\n\n\n\ndef drawCirclePattern(centerX, centerY, offset, count, radius):\n drawTimes = 0\n theta = 0\n rotation = 0\n dTheta = (2*math.pi) / count\n\n while drawTimes < count:\n setRandomColor()\n drawTimes += 1\n xBar = offset * math.cos(theta)\n yBar = offset * math.sin(theta)\n turtle.penup()\n turtle.goto(centerX + xBar, centerY + yBar)\n turtle.pendown()\n drawCircle(radius, rotation)\n rotation += 360 / count\n theta += dTheta\n\ndef drawCircle(radius, rotation):\n turtle.seth(rotation)\n turtle.circle(radius)\n\n\ndef drawSuperPattern(num):\n runAmount = 0\n while runAmount < eval(num):\n whichShape = random.randint(0, 1)\n if whichShape == 0:\n centerX = random.randint(-500,500)\n centerY = random.randint(-400,400)\n offset = random.randint(-150,150)\n width = random.randint(-150,150)\n height = random.randint(-150,150)\n count = random.randint(1,50)\n rotation = random.randint(-150,150)\n drawRectanglePattern(centerX, centerY, offset, width, height, count, rotation)\n else:\n centerX = random.randint(-500,500)\n centerY = random.randint(-400,400)\n offset = random.randint(-150,150)\n count = random.randint(1,50)\n radius = random.randint(-150,150)\n drawCirclePattern(centerX, centerY, offset, count, radius)\n runAmount += 1\n\ndef setRandomColor():\n randColor = random.randint(1,5)\n if randColor == 1:\n turtle.color(\"red\")\n elif randColor == 2:\n turtle.color(\"blue\")\n elif randColor == 3:\n turtle.color(\"green\")\n elif randColor == 4:\n turtle.color(\"cyan\")\n elif randColor == 5:\n turtle.color(\"purple\")\n\n\ndef done():\n turtle.exitonclick()\n","repo_name":"carsonashby/schoolProjects","sub_path":"PycharmProjects/Ashby-Carson-Assn10/pattern.py","file_name":"pattern.py","file_ext":"py","file_size_in_byte":2962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"74642417908","text":"from textual.app import App, ComposeResult\nfrom textual.containers import Vertical\nfrom textual.widgets import Header, Footer, RadioSet, RadioButton\n\nclass RSExternalChangeApp( App[ None ] ):\n\n CSS = \"\"\"\n Vertical {\n align: center middle;\n }\n\n RadioSet {\n padding: 2;\n }\n \"\"\"\n\n BINDINGS = [\n ( \"space\", \"next\", \"Next Radio Button\" )\n ]\n\n def compose( self ) -> ComposeResult:\n yield Header()\n with Vertical():\n yield RadioSet(\n *[ RadioButton( f\"This is item {n}\", not n, id=f\"rb{n}\" ) for n in range( 10 ) ]\n )\n yield Footer()\n\n def action_next( self ):\n pressed = self.query_one( RadioSet ).pressed_button\n next_rb = int( pressed.id.split( \"rb\" )[ 1 ] )\n if next_rb == 9:\n next_rb = 0\n else:\n next_rb += 1\n self.query_one( f\"#rb{next_rb}\", RadioButton ).toggle()\n\nif __name__ == \"__main__\":\n RSExternalChangeApp().run()\n","repo_name":"davep/textual-sandbox","sub_path":"attic/radio_set_external_change.py","file_name":"radio_set_external_change.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"94"} +{"seq_id":"72651137589","text":"# code for streaming twitter to a mysql db\n# for Python 3 and will support emoji characters (utf8mb4)\n# based on the Python 2 code \n# supplied by http://pythonprogramming.net/twitter-api-streaming-tweets-python-tutorial/\n# for further information on how to use python 3, twitter's api, and \n# mysql together visit: http://miningthedetails.com/blog/python/TwitterStreamsPythonMySQL/\n\n\nfrom tweepy import Stream\nfrom tweepy import OAuthHandler\nfrom tweepy.streaming import StreamListener\nimport mysql.connector\nfrom mysql.connector import errorcode\nimport time\nimport json\n\n\n\n# set up connection to db\n# make sure to set charset to 'utf8mb4' to support emoji\ncnx = mysql.connector.connect(user='root', password='',\n host='localhost',\n database='dbname',\n charset = 'utf8mb4')\ncursor=cnx.cursor()\n\n\n#Twitter consumer key, consumer secret, access token, access secret\nckey=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\ncsecret=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\natoken=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\nasecret=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\n\n# set up stream listener\nclass listener(StreamListener):\n\n def on_data(self, data):\n all_data = json.loads(data)\n\t\t# collect all desired data fields \n if 'text' in all_data:\n tweet = all_data[\"text\"]\n created_at = all_data[\"created_at\"]\n retweeted = all_data[\"retweeted\"]\n username = all_data[\"user\"][\"screen_name\"]\n user_tz = all_data[\"user\"][\"time_zone\"]\n user_location = all_data[\"user\"][\"location\"]\n user_coordinates = all_data[\"coordinates\"]\n\t\t \n\t # if coordinates are not present store blank value\n\t # otherwise get the coordinates.coordinates value\n if user_coordinates is None:\n final_coordinates = user_coordinates\n else:\n final_coordinates = str(all_data[\"coordinates\"][\"coordinates\"])\n\t\t \n\t # inser values into the db\n cursor.execute(\"INSERT INTO tableName (created_at, username, tweet, coordinates, userTimeZone, userLocation, retweeted) VALUES (%s,%s,%s,%s,%s,%s,%s)\",\n (created_at, username, tweet, final_coordinates, user_tz, user_location, retweeted))\n cnx.commit()\n \n print((username,tweet))\n \n return True\n else:\n return True\n\n def on_error(self, status):\n print(status)\n\nauth = OAuthHandler(ckey, csecret)\nauth.set_access_token(atoken, asecret)\n\n# create stream and filter on a searchterm\ntwitterStream = Stream(auth, listener())\ntwitterStream.filter(track=[\"searchterm\"],\n languages = [\"en\"], stall_warnings = True)","repo_name":"gistable/gistable","sub_path":"all-gists/e38e0588bf6d8f32e99d/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"94"} +{"seq_id":"24871476134","text":"#!/usr/bin/env python\n\"\"\"\n test02 of seal class\n \n Getting converged solution on refined grid at larger eccentricity ratios.\n \n - For large eccentricity ratios (e >= 0.5), convergence difficulties\n are encountered for the zeroth-order problem. Notably, the issue seems\n to stem from friction source terms.\n - An alternative treatment of the friction source terms has been implemented\n using a Taylor series linearization to get implicit and explicit\n contributions. However, this alternative scheme does not appear to be any\n more robust than the original, simple implicit/explicit blending.\n - Obtaining a stable, converged solution requires an excessive amount of \n tuning of the momentum and pressure relaxation factors.\n - A simple solution is to increase the blending factor \"uv_src_blend\" to a\n large value to increase the diagonal dominance of the momentum equations.\n This can be used to maintain stability at the cost of a slower convergence\n rate. (Note the very large momentum residuals at the start of the \n computation.)\n - This approach still requires tuning, but the tuning can be limited to\n a single factor, which if made large (1-2 orders-of-magnitude larger\n default value of 1.0) will ensure convergence.\n - Note in this example that the discretization is set to fully upwind,\n \"gamma : 0.0\", this should always be the first setting to change to enhance\n stability.\n\"\"\"\nimport sys\nsys.path.append(\"../../src\")\n\nfrom seal import seal\nfrom seal_funcs import read_parameters\nimport time\n\ndef main():\n \"\"\"\n to call run *.py file containing class as script\n \"\"\"\n # output filename\n param = read_parameters('Kanki01_input.yaml')\n s = seal(param)\n s.solve_zeroth()\n s.plot_res()\n \n \nif __name__ == \"__main__\":\n start = time.time()\n main() \n end = time.time()\n print('runtime [s]')\n print(end-start)\n","repo_name":"trosny/bulk_flow","sub_path":"tests/test02/test02.py","file_name":"test02.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"33778278651","text":"from helpers import *\n\ntxt = day_input(25)\n\ndef snafu_to_decimal(s):\n acc = 0\n for c in s:\n x = None\n if c == '=': x = -2\n if c == '-': x = -1\n if c == '0': x = 0\n if c == '1': x = 1\n if c == '2': x = 2\n acc *= 5\n acc += x\n return acc\n\ndef decimal_to_snafu(d):\n acc = d\n chars = []\n while acc != 0:\n r = acc % 5\n acc //= 5\n\n if r >= 3:\n acc += 1\n r -= 5\n\n if r >= 0:\n chars.append(str(r))\n else:\n chars.append('-' if r == -1 else '=')\n\n return ''.join(reversed(chars))\n\nprint(decimal_to_snafu(sum([snafu_to_decimal(s) for s in txt.splitlines()])))\n","repo_name":"toteload/aoc2022","sub_path":"day25.py","file_name":"day25.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"6349613082","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\n# 名前\n# tit = \"Pod cpu usage\"いらない\nxlab = \"time(s)\"\nylab = \"usage\"\npic = \"/home/hikida/promethe/csvfiles/0914_read_300.png\"\ncsv = \"/home/hikida/promethe/csvfiles/read_0914_300.csv\"\n\n# CSVファイルからデータを読み込み\ndata = pd.read_csv(csv)\n\n# x軸、y軸のデータを抽出\nd = 5\nx = data[data.columns.values[0]]\nylist = []\n\"\"\"\nfor col in data.columns.values:\n if col == \"carts\" or col == \"front-end\":\n ylist.append(col) \ny_cols = ylist\n\"\"\"\ny_cols = data.columns.values[1:-1]\n# グラフを描画\nfor y in y_cols:\n Y = data[y]\n plt.plot(x, Y, label=y)\n\n# グラフにタイトルやラベルを設定\n# plt.title(tit)\nplt.xlabel(xlab, fontsize=20)\nplt.ylabel(ylab, fontsize=20)\nplt.legend(fontsize=16, ncol=4, bbox_to_anchor=(0.48, 1.18), loc = 9, frameon=False)\nplt.grid(axis = \"y\")\nplt.tick_params(labelsize=20) # 値の文字フォント\n\nplt.xlim(1, (len(data)-1) * d) # xを1からにすることで原点0を1つにする\nplt.ylim(0, 160000000)\n# グラフを表示\nfig = plt.gcf()\nfig.set_size_inches(8.7, 6) # 黄金比\n\n# グラフを保存\nplt.savefig(pic)\n\n# plt.show()","repo_name":"cdsl-research/Hikida_4_1","sub_path":"csvfiles/graf.py","file_name":"graf.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"70963024948","text":"import cadquery as cq\nfrom synthprinter import *\n\n# This project was built successfully.\n# https://fedi.aria.dog/media/71dd4bd2ba8773d143337a12ba7a4d5b9e01c9e783cacab8c24a8f35734db232.jpg\n\nsp = SynthPrinter()\nsp.addKosmoPanel(3)\n\n# Col 1\nsp.addPotentiometer(kcol(1), krow(1), \"all\", \"right\")\nsp.addKnob(kcol(1), krow(1), 21, 16)\nsp.addPotentiometer(kcol(1), krow(2), \"all\", \"right\")\nsp.addKnob(kcol(1), krow(2), 12, 16)\nsp.addSlider(kcol(1), 100, 11.6, 68.4, 4, 50)\nsp.addBigJack(kcol(1), krow(6))\nsp.addBigJack(kcol(1), krow(7))\nsp.engraveLine(kcol(1), krow(1), 180, 155, 2)\nsp.engraveLine(kcol(1), krow(7), 45, 40, 2)\n\n# Col 3\nsp.addPotentiometer(kcol(3), krow(1), \"all\", \"left\")\nsp.addKnob(kcol(3), krow(1), 21, 16)\nsp.addPotentiometer(kcol(3), krow(2), \"all\", \"left\")\nsp.addKnob(kcol(3), krow(2), 12, 16)\nsp.addSlider(kcol(3), 100, 11.6, 68.4, 4, 50)\nsp.addBigJack(kcol(3), krow(6))\nsp.addBigJack(kcol(3), krow(7))\nsp.engraveLine(kcol(3), krow(1), 180, 155, 2)\nsp.engraveLine(kcol(3), krow(7), -45, 40, 2)\n\n# Col 2\nsp.addPotentiometer(kcol(2), krow(4), \"all\", \"left\")\nsp.addKnob(kcol(2), krow(4), 21, 16)\nsp.addBigJack(kcol(2), krow(6))\nsp.addBigJack(kcol(2), krow(7))\nsp.addLed5mm(kcol(1.75), krow(2.75))\nsp.addLed5mm(kcol(2.25), krow(2.75))\nsp.addLed5mm(kcol(1.75), krow(3.25))\nsp.addLed5mm(kcol(2.25), krow(3.25))\nsp.addLed5mm(kcol(2), krow(4.75))\nsp.addLed5mm(kcol(2), krow(5.25))\nsp.engraveLine(kcol(2), krow(4), 180, 75, 2)\nsp.engraveLine(kcol(1.75), krow(2.75), -90, 18, 2)\nsp.engraveLine(kcol(2.25), krow(2.75), 90, 18, 2)\nsp.engraveLine(kcol(1.75), krow(3.25), -90, 18, 2)\nsp.engraveLine(kcol(2.25), krow(3.25), 90, 18, 2)\n\nsp.supportBar(0, 9, 75, 5, 6)\nsp.supportBar(0, 185, 75, 5, 6)\nsp.supportBar(48, 59, 5, 130, 6)\nsp.supportBar(48, 59, -20, 5, 6)\nsp.supportBar(37.5, 9, 5, 50, 6)\n\nsp.render(show_object)\n","repo_name":"AriaSalvatrice/synth-printer","sub_path":"example-08-attenuverter.py","file_name":"example-08-attenuverter.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"94"} +{"seq_id":"26863324433","text":"from dataset import get_data\r\nfrom model import Denoise\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport argparse\r\nfrom imageio import imwrite\r\n\r\n# will change hyperparameters later\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('--num_epoch', type=int, default=100, help='Number of epochs to run [default: 10]')\r\nparser.add_argument('--batch_size', type=int, default=5, help='Batch size [default: 5]')\r\nparser.add_argument('--patch_size', type=int, default=100, help='Patch size [default: 100]')\r\nparser.add_argument('--learning_rate', type=int, default=0.0001, help='Learning rate [default: e-5]')\r\nparser.add_argument('--epsilon', type=int, default=0.00316, help='Epsilon [default: 0.00316]')\r\nparser.add_argument('--log_dir', type=str, default='log', help='Log dir [default: log]')\r\nparser.add_argument('--dataset', type=str, default='images', help='Dataset path [default: images]')\r\nFLAGS = parser.parse_args()\r\n\r\nNUM_EPOCH = FLAGS.num_epoch\r\nBATCH_SIZE = FLAGS.batch_size\r\nLEARNING_RATE = FLAGS.learning_rate\r\nEPSILON = FLAGS.epsilon\r\nLOG_DIR = FLAGS.log_dir\r\nDATASET = FLAGS.dataset\r\n\r\ndef train():\r\n\t'''\r\n\tgeneral structure for training\r\n\t'''\r\n\tinputs, labels = get_data(DATASET)\r\n\tmodel = Denoise()\r\n\toptimizer = tf.keras.optimizers.Adam(LEARNING_RATE)\r\n\t#saver = tf.train.Saver()\r\n\r\n \r\n\ts = 'images/original_cornell_box.png'\r\n\timwrite(s, inputs)\r\n \r\n\tinputs = tf.reshape(inputs, (1, 512, 512, 3))/255\r\n\tlabels = tf.reshape(labels, (1, 512, 512, 3))/255\r\n\tfor epoch in range(NUM_EPOCH):\r\n\t\tfor i in range(0, len(inputs) - BATCH_SIZE, BATCH_SIZE):\r\n\t\t\tfor j in range(0, inputs.shape[1] - PATCH_SIZE, PATCH_SIZE):\r\n\t\t\t\tfor k in range(0, inputs.shape[2] - PATCH_SIZE, PATCH_SIZE):\r\n\t\t\t\t\twith tf.GradientTape() as tape:\r\n\t\t\t\t\t\tbatch_patch_inputs = inputs[i:i+BATCH_SIZE][j:j+PATCH_SIZE][k:k+PATCH_SIZE]\r\n\t\t\t\t\t\tbatch_patch_labels = labels[i:i+BATCH_SIZE][j:j+PATCH_SIZE][k:k+PATCH_SIZE]\r\n\t\t\t\t\t\tdiffuse, specular = model.call(batch_patch_inputs, batch_patch_labels)\r\n\t\t\t\t\t\tpredictions = EPSILON * diffuse + tf.exp(specular) - 1\r\n\t\t\t\t\t\tloss = model.loss(predictions, batch_patch_labels)\r\n\t\t\t\t\tgradients = tape.gradient(loss, model.trainable_variables)\r\n\t\t\t\t\toptimizer.apply_gradients(zip(gradients, model.trainable_variables))\r\n\t\t\t\t\tprint(\"LOSS\", epoch, \":\", loss)\r\n \r\n\r\n\t# dont quite remember how to save, need sessions?\r\n\t# save_path = saver.save(, os.path.join(LOG_DIR, \"model.ckpt\"))\r\n\t# print(\"Model saved in file: %s\" % save_path)\r\n\twrite_prediction(inputs, model)\r\n\r\ndef write_prediction(inputs, model):\r\n\tprediction_diff, prediction_spec = model.call(inputs, inputs)\r\n\tprediction = np.array(EPSILON * prediction_diff * 255) # Change when we add specular\r\n\tprediction = prediction.astype(np.uint8)\r\n\tprediction = np.clip(np.reshape(prediction, (512, 512, 3)), 0, 255)\r\n\r\n\ts = 'images/predicted_cornell_box.png'\r\n\timwrite(s, prediction)\r\n\r\n\r\nif __name__ == '__main__':\r\n\ttrain()","repo_name":"bli0428/myKPCN","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2928,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"33739123084","text":"triangle = int(input(\"Entrez la hauteur du triangle : \"))\n\ndef draw_triangle(height):\n for i in range(height):\n base = \" \"\n if i == height -1:\n base = '_'\n print(' ' * (height - i), end='')\n print('/', end='')\n print(base * i * 2, end='')\n print('\\\\')\n\ndraw_triangle(triangle)","repo_name":"ahcene-kadi/runtrack-python","sub_path":"jour01/job23/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"21441836510","text":"import os\n\nimport pytest\n\nfrom .source_test import check_source_example\n\ntestdata_dir = os.path.realpath(\n os.path.join(\n os.path.dirname(__file__), '..', '..', 'testdata', 'source', 'mt940'))\n\nexamples = [\n ('test_basic', '0708271685_09022020_164516.940'),\n]\n\n\n@pytest.mark.parametrize('name,mt940_filename', examples)\ndef test_source(name: str, mt940_filename: str):\n check_source_example(\n example_dir=os.path.join(testdata_dir, name),\n source_spec={\n 'module': 'beancount_import.source.mt940',\n 'filenames': [os.path.join(testdata_dir, mt940_filename)],\n 'mt940_bank': 'ASNB',\n },\n replacements=[(testdata_dir, '')])\n\n","repo_name":"gpaulissen/beancount-import-copy","sub_path":"beancount_import/source/mt940_test.py","file_name":"mt940_test.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"39518636562","text":"import logging\nimport telegram\nfrom telegram.ext import Updater\nfrom config import *\nfrom telegram.ext import CommandHandler\nfrom telegram.ext import MessageHandler, Filters\nfrom texts import *\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\nupdater = Updater(token=TOKEN)\nbot = telegram.Bot(token=TOKEN)\nprint(bot.get_me())\ndispatcher = updater.dispatcher\n\n\ndef start(bot, update):\n bot.send_message(chat_id=update.message.chat_id, text=MSG_START)\n\n\ndef echo(bot, update):\n text = update.message.text.lower()\n if update.message.chat_id != MODERATION_CHAT_ID:\n bot.send_message(chat_id=MODERATION_CHAT_ID, text=update.message.text)\n bot.send_message(chat_id=update.message.chat_id, text= MSG_SENT)\n else:\n if update.message.reply_to_message is not None:\n msg = update.message.reply_to_message\n if text == '+':\n bot.sendMessage(chat_id=CHANNEL_NAME, text=msg.text)\n\n\ndisp = updater.dispatcher\ndisp.add_handler(CommandHandler(\"start\", start))\necho_handler = MessageHandler(Filters.text, echo)\ndispatcher.add_handler(echo_handler)\nupdater.start_polling()\nupdater.idle()\n","repo_name":"toxydose/valentinesbot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"15542131200","text":"\"\"\"\n《应用统计与凸优化》9.24作业:\n(1)构造函数,做 wald test;\n(2)生成50个、100个二项分布随机数,代入函数中计算p值。\n\n调试记录:\n\n\"\"\"\nimport numpy as np\nfrom scipy import stats\n\n\n# 求二项分布参数p的最大似然估计值\ndef binomial_MLE(sample, N):\n\n \"\"\"\n :param sample: 二项分布的样本\n :param N: 二项分布的参数N(实验次数)\n :return: 参数p的最大似然估计值\n \"\"\"\n return np.mean(sample) / N\n\n\n# 求二项分布的Fisher信息量\ndef binomial_fisher(sample, N):\n \"\"\"\n :param sample: 二项分布的样本\n :param N: 二项分布的参数N(实验次数)\n :return: Fisher信息量\n \"\"\"\n # 求p的最大似然估计\n P = binomial_MLE(sample, N)\n\n return N / (P * (1-P))\n\n\n# 二项分布参数p的wald检验\ndef wald_test(sample, N, theta0):\n \"\"\"\n :param sample: 二项分布的样本\n :param N: 二项分布的参数N(实验次数)\n :param theta0: 假设检验的p值(已知)\n :return: p-value\n \"\"\"\n n = sample.size # 样本个数\n theta_MLE = binomial_MLE(sample, N) # 求p的最大似然估计\n I_theta = binomial_fisher(sample, N) # 求二项分布的Fisher信息量\n Tn = n * I_theta * (theta_MLE - theta0)**2 # Tn表征了theta_MLE和theta0的接近程度\n C = Tn # C为H0拒绝域的未知参数\n q_alpha = C # q_alpha为卡方分布的(1-alpha)分位点\n chi2_p = stats.chi2.cdf(q_alpha, 1) # 已知分位点为q_alpha,自由度为1,求卡方分布的p值\n p_value = 1 - chi2_p # 转换为右侧尾部的概率\n\n if p_value < 0.05:\n print(\"%d个随机数,p-value为%.8f;拒绝原假设H0,即:p ≠ 0.5\" % (n, p_value))\n else:\n print(\"%d个随机数,p-value为%.8f;接受原假设H0,即:p = 0.5\" % (n, p_value))\n\n return p_value\n\n\nif __name__ == \"__main__\":\n\n sample1 = np.random.binomial(20, 0.45, 50) # 50个随机数\n pval1 = wald_test(sample1, N=20, theta0=0.5)\n\n sample2 = np.random.binomial(20, 0.45, 100) # 100个随机数\n pval2 = wald_test(sample2, N=20, theta0=0.5)\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Amgroot-w/DAO_assignment","sub_path":"应用统计与凸优化/ex2 假设检验.py","file_name":"ex2 假设检验.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"69837815989","text":"import enum\nfrom decimal import Decimal\n\n# external\nfrom django import forms\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.utils.encoding import force_str\nfrom django.utils.html import format_html\nfrom django.utils.translation import gettext_lazy as _\n\n# internal\nfrom shared.util import ISO_3166_CODES, CURRENCIES\nfrom shared.money import MoneyMaker, AbstractMoney\n\n\npostgresql_engine_names = [\n\t'django.db.backends.postgresql',\n\t'django.db.backends.postgresql_psycopg2',\n]\n\nif settings.DATABASES['default']['ENGINE'] in postgresql_engine_names:\n\t# django already provides JSONField for postgres databases\n\tfrom django.contrib.postgres.fields import JSONField as _JSONField\nelse:\n\tfrom jsonfield.fields import JSONField as _JSONField\n\n\nclass JSONField(_JSONField):\n\tdef __init__(self, *args, **kwargs):\n\t\tkwargs.update({'default': dict})\n\t\tsuper().__init__(*args, **kwargs)\n\n\tdef deconstruct(self):\n\t\tname, path, args, kwargs = super().deconstruct()\n\t\tdel kwargs['default']\n\t\treturn name, path, args, kwargs\n\n\nclass ChoiceEnumMeta(enum.EnumMeta):\n\tdef __call__(cls, value, *args, **kwargs):\n\t\tif isinstance(value, str):\n\t\t\ttry:\n\t\t\t\tvalue = cls.__members__[value]\n\t\t\texcept KeyError:\n\t\t\t\tpass # let the super method complain\n\t\treturn super().__call__(value, *args, **kwargs)\n\n\tdef __new__(metacls, classname, bases, classdict):\n\t\tlabels = {}\n\t\tfor key in classdict._member_names:\n\t\t\tsource_value = classdict[key]\n\t\t\tif isinstance(source_value, (list, tuple)):\n\t\t\t\ttry:\n\t\t\t\t\tval, labels[key] = source_value\n\t\t\t\texcept ValueError:\n\t\t\t\t\traise ValueError(\"Invalid ChoiceEnum member '{}'\".format(key))\n\t\t\telse:\n\t\t\t\tval = source_value\n\t\t\t\tlabels[key] = key.replace(\"_\", \" \").title()\n\t\t\t# Use dict.__setitem__() to suppress defenses against\n\t\t\t# double assignment in enum's classdict\n\t\t\tdict.__setitem__(classdict, key, val)\n\t\tcls = super().__new__(metacls, classname, bases, classdict)\n\t\tfor key, label in labels.items():\n\t\t\tgetattr(cls, key).label = label\n\t\treturn cls\n\n\t@property\n\tdef choices(cls):\n\t\treturn [(k.value, k.label) for k in cls]\n\n\t@property\n\tdef default(cls):\n\t\ttry:\n\t\t\treturn next(iter(cls))\n\t\texcept StopIteration:\n\t\t\treturn None\n\n\nclass ChoiceEnum(enum.Enum, metaclass=ChoiceEnumMeta):\n\t\"\"\"\n\tUtility class to handle choices in Django model and/or form fields.\n\tUsage:\n\n\tclass Color(ChoiceEnum):\n\t\tWHITE = 0, \"White\"\n\t\tRED = 1, \"Red\"\n\t\tGREEN = 2, \"Green\"\n\t\tBLUE = 3, \"Blue\"\n\n\tgreen = Color.GREEN\n\n\tcolor = forms.ChoiceField(\n\t\tchoices=Color.choices,\n\t\tdefault=Color.default,\n\t)\n\t\"\"\"\n\tdef __str__(self):\n\t\treturn force_str(self.label)\n\n\nclass ChoiceEnumField(models.PositiveSmallIntegerField):\n\tdescription = _(\"Customer recognition state\")\n\n\tdef __init__(self, *args, **kwargs):\n\t\tself.enum_type = kwargs.pop('enum_type', ChoiceEnum) # fallback is required form migrations\n\t\tif not issubclass(self.enum_type, ChoiceEnum):\n\t\t\traise ValueError(\"enum_type must be a subclass of `ChoiceEnum`.\")\n\t\tkwargs.update(choices=self.enum_type.choices)\n\t\tkwargs.setdefault('default', self.enum_type.default)\n\t\tsuper().__init__(*args, **kwargs)\n\n\tdef deconstruct(self):\n\t\tname, path, args, kwargs = super().deconstruct()\n\t\tif 'choices' in kwargs:\n\t\t\tdel kwargs['choices']\n\t\tif kwargs['default'] is self.enum_type.default:\n\t\t\tdel kwargs['default']\n\t\telif isinstance(kwargs['default'], self.enum_type):\n\t\t\tkwargs['default'] = kwargs['default'].value\n\t\treturn name, path, args, kwargs\n\n\tdef from_db_value(self, value, expression, connection):\n\t\ttry:\n\t\t\treturn self.enum_type(value)\n\t\texcept ValueError:\n\t\t\treturn value\n\n\tdef get_prep_value(self, state):\n\t\tif isinstance(state, self.enum_type):\n\t\t\treturn state.value\n\t\treturn state\n\n\tdef to_python(self, state):\n\t\treturn self.enum_type(state)\n\n\tdef value_to_string(self, obj):\n\t\tvalue = getattr(obj, self.name, obj)\n\t\tif not isinstance(value, self.enum_type):\n\t\t\traise ValueError(\"Value must be of type {}\".format(self.enum_type))\n\t\treturn value.name\n\n\nclass CountryField(models.CharField):\n\t\"\"\"\n\tThis creates a simple input field to choose a country.\n\t\"\"\"\n\tdef __init__(self, *args, **kwargs):\n\t\tdefaults = {\n\t\t\t'max_length': 3,\n\t\t\t'choices': ISO_3166_CODES,\n\t\t}\n\t\tdefaults.update(kwargs)\n\t\tsuper().__init__(*args, **defaults)\n\n\tdef deconstruct(self):\n\t\tname, path, args, kwargs = super().deconstruct()\n\t\tif kwargs['max_length'] == 3:\n\t\t\tkwargs.pop('max_length')\n\t\tif kwargs['choices'] == ISO_3166_CODES:\n\t\t\tkwargs.pop('choices')\n\t\treturn name, path, args, kwargs\n\n\nclass MoneyFieldWidget(forms.widgets.NumberInput):\n\t\"\"\"\n\tReplacement for NumberInput widget adding the currency suffix.\n\t\"\"\"\n\tdef __init__(self, attrs=None):\n\t\tdefaults = {'style': 'width: 75px; text-align: right'}\n\t\ttry:\n\t\t\tself.currency_code = attrs.pop('currency_code')\n\t\t\tdefaults.update(attrs)\n\t\texcept (KeyError, TypeError):\n\t\t\traise ValueError(\"MoneyFieldWidget must be instantiated with a currency_code.\")\n\t\tsuper().__init__(defaults)\n\n\tdef render(self, name, value, attrs=None, renderer=None):\n\t\tinput_field = super().render(name, value, attrs, renderer)\n\t\treturn format_html('{} {}', input_field, self.currency_code)\n\n\nclass MoneyFormField(forms.DecimalField):\n\t\"\"\"\n\tUse this field type in Django Forms instead of a DecimalField, whenever a input field for\n\tthe Money representation is required.\n\t\"\"\"\n\tdef __init__(self, money_class=None, **kwargs):\n\t\tif money_class is None:\n\t\t\tmoney_class = MoneyMaker()\n\t\tif not issubclass(money_class, AbstractMoney):\n\t\t\traise AttributeError(\"Given `money_class` does not declare a valid money type\")\n\t\tself.Money = money_class\n\t\tif 'widget' not in kwargs:\n\t\t\tkwargs['widget'] = MoneyFieldWidget(attrs={'currency_code': money_class.currency})\n\t\tsuper().__init__(**kwargs)\n\n\tdef prepare_value(self, value):\n\t\tif isinstance(value, AbstractMoney):\n\t\t\treturn Decimal(value)\n\t\treturn value\n\n\tdef to_python(self, value):\n\t\tvalue = super().to_python(value)\n\t\treturn self.Money(value)\n\n\tdef validate(self, value):\n\t\tif value.currency != self.Money.currency:\n\t\t\traise ValidationError(\"Can not convert different Money types.\")\n\t\tsuper().validate(Decimal(value))\n\t\treturn value\n\n\nclass MoneyField(models.DecimalField):\n\t\"\"\"\n\tA MoneyField shall be used to store money related amounts in the database,\n\tkeeping track of the used currency. Accessing a model field of type MoneyField,\n\treturns a MoneyIn type.\n\t\"\"\"\n\tdescription = _(\"Money in %(currency_code)s\")\n\n\tdef __init__(self, *args, **kwargs):\n\t\tself.currency_code = kwargs.pop('currency', settings.DEFAULT_CURRENCY)\n\t\tself.Money = MoneyMaker(self.currency_code)\n\t\tdefaults = {\n\t\t\t'max_digits': 30,\n\t\t\t'decimal_places': CURRENCIES[self.currency_code][1],\n\t\t}\n\t\tdefaults.update(kwargs)\n\t\tsuper().__init__(*args, **defaults)\n\n\tdef deconstruct(self):\n\t\tname, path, args, kwargs = super(MoneyField, self).deconstruct()\n\t\tif kwargs['max_digits'] == 30:\n\t\t\tkwargs.pop('max_digits')\n\t\tif kwargs['decimal_places'] == CURRENCIES[self.currency_code][1]:\n\t\t\tkwargs.pop('decimal_places')\n\t\treturn name, path, args, kwargs\n\n\tdef to_python(self, value):\n\t\tif isinstance(value, AbstractMoney):\n\t\t\treturn value\n\t\tif value is None:\n\t\t\treturn self.Money('NaN')\n\t\tvalue = super().to_python(value)\n\t\treturn self.Money(value)\n\n\tdef get_prep_value(self, value):\n\t\t# force to type Decimal by using grandparent super\n\t\tvalue = super(models.DecimalField, self).get_prep_value(value)\n\t\treturn super().to_python(value)\n\n\tdef from_db_value(self, value, expression, connection):\n\t\tif value is None:\n\t\t\treturn\n\t\tif isinstance(value, float):\n\t\t\tvalue = str(value)\n\t\treturn self.Money(value)\n\n\tdef get_db_prep_save(self, value, connection):\n\t\tif isinstance(value, Decimal) and value.is_nan():\n\t\t\treturn None\n\t\treturn super().get_db_prep_save(value, connection)\n\n\tdef get_prep_lookup(self, lookup_type, value):\n\t\tif isinstance(value, AbstractMoney):\n\t\t\tif value.get_currency() != self.Money.get_currency():\n\t\t\t\tmsg = \"This field stores money in {}, but the lookup amount is in {}\"\n\t\t\t\traise ValueError(msg.format(value.get_currency(), self.Money.get_currency()))\n\t\t\tvalue = value.as_decimal()\n\t\tresult = super().get_prep_lookup(lookup_type, value)\n\t\treturn result\n\n\tdef value_to_string(self, obj):\n\t\tvalue = self._get_val_from_obj(obj)\n\t\t# grandparent super\n\t\tvalue = super(models.DecimalField, self).get_prep_value(value)\n\t\treturn self.to_python(value)\n\n\tdef formfield(self, **kwargs):\n\t\twidget = MoneyFieldWidget(attrs={'currency_code': self.Money.currency})\n\t\tdefaults = {'form_class': MoneyFormField, 'widget': widget, 'money_class': self.Money}\n\t\tdefaults.update(**kwargs)\n\t\tformfield = super().formfield(**defaults)\n\t\treturn formfield\n","repo_name":"loudpumpkins/ecommerce","sub_path":"shared/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":8614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"39884064992","text":"from django.contrib import admin\n\nfrom .models import AboutUs, OurTeam, OurService\n\n\nclass OurTeamInline(admin.StackedInline):\n model = OurTeam\n extra = 0\n classes = ['collapse']\n\n\nclass OurServiceInline(admin.StackedInline):\n model = OurService\n extra = 0\n classes = ['collapse']\n\n\n@admin.register(AboutUs)\nclass AboutUsAdmin(admin.ModelAdmin):\n inlines = [OurTeamInline, OurServiceInline]\n","repo_name":"mehdi1371-ir/clothing_shop_1","sub_path":"pages/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"17108450997","text":"#\n# Importar as bibliotecas necessárias.\n# socket - Interface de sockets para Python.\n# threading - Permite suportar múltiplas atividades (threads) em cada programa.\n# tkinter - Interface Python para o ambiente gráfico Tcl/Tk.\n#\nimport socket\nfrom threading import Thread\nimport tkinter\nfrom tkinter import *\n\n\ndef receive():\n \"\"\"\n Receber menssagens em ciclo e mostra-las numa janela tkinter.\n\n :return: None.\n \"\"\"\n while True:\n try:\n msg = s.recv(1024).decode() # Receber mensagem e descodificá-la como string.\n msg_list.insert(tkinter.END, msg) # Inserir mensagem no final da janela.\n except Exception:\n print(\"There is an Error Receiving Message\")\n\ndef send():\n \"\"\"\n Obter mensagem do campo de input tkinter e enviá-la para o servidor.\n\n :return:\n \"\"\"\n msg = my_msg.get() # Obter mensagem do campo de input do tkinter.\n my_msg.set(\"\") # Limpar o campo de input para permitir nova mensagem.\n s.send(bytes(msg, \"utf8\")) # Enviar mensagem codificada em utf-8 para o servidor.\n\n\nwindow = Tk()\nwindow.title(\"Chat Application\")\nwindow.configure(bg=\"green\")\n\nmessage_frame = Frame(window, height=100, width=100, bg='white')\nmessage_frame.pack()\n\nmy_msg = StringVar()\nmy_msg.set(\"\")\n\nscroll_bar = Scrollbar(message_frame)\nmsg_list = Listbox(message_frame, height=15, width=100, bg=\"white\", yscrollcommand=scroll_bar.set)\nscroll_bar.pack(side=RIGHT, fill=Y)\nmsg_list.pack(side=LEFT, fill=BOTH)\nmsg_list.pack()\n\nentry_field = Entry(window, textvariable=my_msg, fg=\"black\", width=50)\nentry_field.pack()\n\nsend_button = Button(window, text=\"Send\", font=\"Aerial\", fg=\"black\", bg=\"blue\", command=send)\nsend_button.pack()\n\nhost = \"localhost\"\nport = 8080\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((host, port))\n\nreceive_thread = Thread(target=receive)\nreceive_thread.start()\n\nmainloop()\n","repo_name":"climao/rcd_resources","sub_path":"python/chat_client.py","file_name":"chat_client.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"23767069869","text":"import os\nfrom typing import Any\nfrom typing import cast\nfrom typing import Dict\nfrom typing import Iterable\n\nfrom ...exceptions import InvalidFile\nfrom ..plugins.util import get_plugins_from_file\n\n\ndef upgrade(baseline: Dict[str, Any]) -> None:\n for function in [\n _migrate_filters,\n _rename_high_entropy_string_arguments,\n _migrate_custom_plugins,\n ]:\n function(baseline)\n\n\ndef _migrate_filters(baseline: Dict[str, Any]) -> None:\n \"\"\"\n In v1.0.0, we introduced the idea of `filters`. This consolidated a variety of different\n false positive filters into a configurable layout. To reduce upgrade friction, this will\n contain the default filters used before this version upgrade.\n \"\"\"\n baseline['filters_used'] = [\n {\n 'path': 'detect_secrets.filters.allowlist.is_line_allowlisted',\n },\n {\n 'path': 'detect_secrets.filters.heuristic.is_sequential_string',\n },\n {\n 'path': 'detect_secrets.filters.heuristic.is_potential_uuid',\n },\n {\n 'path': 'detect_secrets.filters.heuristic.is_likely_id_string',\n },\n {\n 'path': 'detect_secrets.filters.heuristic.is_templated_secret',\n },\n {\n 'path': 'detect_secrets.filters.heuristic.is_prefixed_with_dollar_sign',\n },\n {\n 'path': 'detect_secrets.filters.heuristic.is_indirect_reference',\n },\n {\n 'path': 'detect_secrets.filters.common.is_ignored_due_to_verification_policies',\n\n # Hard-code this, just in case VerifiedResult enum values changes.\n # This corresponds to VerifiedResult.UNVERIFIED\n 'min_level': 2,\n },\n ]\n\n if baseline.get('exclude'):\n if baseline['exclude'].get('files'):\n baseline['filters_used'].append({\n 'path': 'detect_secrets.filters.regex.should_exclude_file',\n 'pattern': [\n baseline['exclude']['files'],\n ],\n })\n\n if baseline['exclude'].get('lines'):\n baseline['filters_used'].append({\n 'path': 'detect_secrets.filters.regex.should_exclude_line',\n 'pattern': [\n baseline['exclude']['lines'],\n ],\n })\n\n baseline.pop('exclude')\n\n if baseline.get('word_list'):\n if baseline['word_list']['file']:\n baseline['filters_used'].append({\n 'path': 'detect_secrets.filters.wordlist.should_exclude_secret',\n 'min_length': 3,\n 'file_name': baseline['word_list']['file'],\n 'file_hash': baseline['word_list']['hash'],\n })\n\n baseline.pop('word_list')\n\n\ndef _rename_high_entropy_string_arguments(baseline: Dict[str, Any]) -> None:\n \"\"\"\n During the great refactor for v1.0.0, we also decided to rename these arguments for\n consistency and simplicity.\n \"\"\"\n for plugin in baseline['plugins_used']:\n if plugin['name'] == 'Base64HighEntropyString':\n plugin['limit'] = plugin.pop('base64_limit')\n\n elif plugin['name'] == 'HexHighEntropyString':\n plugin['limit'] = plugin.pop('hex_limit')\n\n # TODO: KeywordDetector?\n\n\ndef _migrate_custom_plugins(baseline: Dict[str, Any]) -> None:\n if 'custom_plugin_paths' not in baseline:\n return\n\n for path in baseline['custom_plugin_paths']:\n try:\n # NOTE: We don't want to use `detect_secrets.core.plugins.initialize.from_file`\n # since we don't want to *initialize* these plugins. That will pollute our global\n # settings object. Instead, we're merely \"parsing\" this file, and applying changes\n # as necessary.\n custom_plugins = cast(Iterable, get_plugins_from_file(path))\n except InvalidFile:\n # Best effort upgrade. Don't break if invalid file.\n continue\n\n for plugin in custom_plugins:\n baseline['plugins_used'].append({\n 'name': plugin.__name__,\n 'path': f'file://{os.path.abspath(path)}',\n })\n\n del baseline['custom_plugin_paths']\n","repo_name":"Yelp/detect-secrets","sub_path":"detect_secrets/core/upgrades/v1_0.py","file_name":"v1_0.py","file_ext":"py","file_size_in_byte":4215,"program_lang":"python","lang":"en","doc_type":"code","stars":3261,"dataset":"github-code","pt":"94"} +{"seq_id":"35233746189","text":"# -*- coding: utf-8 -*-\n\n# A simple webscraper to get the link to the profile image of the specified Github user\nimport requests\nfrom bs4 import BeautifulSoup as bs\n\n# Prompt the user to enter a Github username\ngithub_user = input('Input a Github username: ')\n\n# Send a request to Github to grab the content of that page\nurl = 'https://github.com/' + github_user\nr = requests.get(url)\nsoup = bs(r.content, 'html.parser') # gets the HTML source code of that page in the URL\nprofile_image = soup.find('img', {'alt': 'Avatar'})['src'] # look for the profile image of the specified Github user\nprint(profile_image) # prints the link to the profile image for the specified Github user","repo_name":"Astralknight532/Projects","sub_path":"FreeCodeCampYT/Python/simplewebscraper.py","file_name":"simplewebscraper.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"7276320716","text":"# -*- coding: utf-8 -*-\nimport cv2\n\n\ndef main(args=None, test=False):\n vc = cv2.VideoCapture(\"D:\\\\10059.mp4\")\n if vc.isOpened():\n o, frame = vc.read()\n else:\n o = False\n\n while o:\n ret, frame = vc.read()\n if frame is None:\n break\n if ret:\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n cv2.imshow('result', gray)\n if cv2.waitKey(10) & 0xFF == 27:\n break\n vc.release()\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"wuzhenhua01/douyin-helper","sub_path":"douyi_helper/douyin_helper.py","file_name":"douyin_helper.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"22582765040","text":"sys_includes = [\n 'QString',\n 'QByteArray',\n 'QPoint',\n 'QVector',\n 'QColor',\n 'QPolygon',\n]\nincludes = [\n 'enums.h',\n 'transport.h',\n 'Stroke.h',\n]\n\n\ndef join_lines(lines):\n return '\\n '.join(lines)\n\n\nclass Type:\n def __init__(self, name, copy=False):\n self.copy = copy\n self.name = name\n\n def for_param(self):\n if self.copy:\n return self.for_field()\n return 'const %s&' % self.name\n\n def for_field(self):\n return self.name\n\n def decode(self, var_name, array_name, use_setter):\n return '/* TODO: decode %s %s here from %s */' % (self.name, var_name, array_name)\n\n def encode_simple(self, var_name):\n return '/* TODO: encode %s %s here */' % (self.name, var_name)\n\n def encode(self, simple, var_name):\n result = self.encode_simple(var_name)\n if not simple:\n result = 'array.append(%s);' % result\n return result\n\n def __str__(self):\n return self.name\n\n @staticmethod\n def decoded_readonly():\n return False\n\n @staticmethod\n def setter_name(name):\n return name[0].upper() + name[1:]\n\n @staticmethod\n def assign(var_name, value, use_setter):\n if use_setter:\n return 'set%s(%s);' % (Type.setter_name(var_name), value)\n else:\n return '%s = %s;' % (var_name, value)\n\n\nclass StringType(Type):\n def __init__(self, name):\n super().__init__(name)\n\n def decode(self, var_name, array_name, use_setter):\n return self.assign(var_name, '%s(%s)' % (self.name, array_name), use_setter)\n\n def encode_simple(self, var_name):\n return '%s.toUtf8()' % var_name\n\n @staticmethod\n def decoded_readonly():\n return True\n\n\nclass StructType(Type):\n def __init__(self, name, fields, getters_setters=True):\n \"\"\"\n :type fields: list[Field]\n \"\"\"\n super().__init__(name)\n self.fields = fields\n self.getters_setters = getters_setters\n\n def decode(self, var_name, array_name, use_setter):\n assert not use_setter, 'use_setter is not supported for struct type %s' % self\n return '\\n '.join('%s.%s' % (var_name, field.decode_lines(array_name, self.getters_setters)) for field in self.fields)\n\n def encode(self, simple, var_name):\n return '\\n '.join(field.encode_lines(False, var_name, self.getters_setters) for field in self.fields)\n\n\nclass ListType(Type):\n def __init__(self, name, item_type):\n \"\"\"\n :type item_type: StructType\n \"\"\"\n super().__init__(name)\n self.item_type = item_type\n\n def decode(self, var_name, array_name, use_setter):\n assert not use_setter, 'use_setter is not supported for list type %s' % self\n counter = Field(tuint32, '%sCount' % var_name)\n s = '\\n '.join([counter.decl_field(), counter.decode_lines(array_name)])\n subdecode = ''.join('''\n %s %s;\n %s\n''' % (f.type.name, f.name, f.decode_lines(array_name)) for f in self.item_type.fields)\n s += '''\n for (int _ = 0; _ < %(counter_name)s; ++_) {%(subdecode)s\n %(var_name)s << %(item_type)s(%(params)s);\n }''' % {\n 'var_name': var_name,\n 'counter_name': counter.name,\n 'item_type': self.item_type.name,\n 'subdecode': subdecode,\n 'params': ', '.join(f.name for f in self.item_type.fields),\n }\n return s\n\n def encode(self, simple, var_name):\n counter = Field(tuint32, 'size')\n s = counter.encode_lines(False, var_name)\n item_name = ('%sItem' % var_name).replace('.', '_')\n subencode = self.item_type.encode(False, item_name).replace('\\n', '\\n ')\n s += '''\n for (%(item_type)s %(item_name)s : %(var_name)s) {\n %(subencode)s\n }''' % {\n 'var_name': var_name,\n 'item_type': self.item_type.for_param(),\n 'item_name': item_name,\n 'subencode': subencode,\n }\n return s\n\n\nclass VaryingIntType(Type):\n def __init__(self, name):\n super().__init__(name, True)\n\n def decode(self, var_name, array_name, use_setter):\n return self.assign(var_name, 'decodeAndShift(%s)' % array_name, use_setter)\n\n def encode_simple(self, var_name):\n return 'encode(static_cast<%s>(%s))' % (self.name, var_name)\n\n\nclass FixedIntType(Type):\n def __init__(self, name):\n super().__init__(name, True)\n\n def decode(self, var_name, array_name, use_setter):\n assignment = self.assign(var_name, 'static_cast<%s>(%s[0])' % (self.name, array_name), use_setter)\n return \"%(assignment)s\\n %(array_name)s = %(array_name)s.mid(1);\" % {\n 'assignment': assignment,\n 'array_name': array_name,\n }\n\n def encode_simple(self, var_name):\n return 'static_cast<%s>(%s)' % (self.name, var_name)\n\n\nclass BoolType(Type):\n def __init__(self, name):\n super().__init__(name, True)\n\n def decode(self, var_name, array_name, use_setter):\n assignment = self.assign(var_name, \"%s[0] != '\\\\0'\" % array_name, use_setter)\n return \"%(assignment)s\\n %(array_name)s = %(array_name)s.mid(1);\" % {\n 'assignment': assignment,\n 'array_name': array_name,\n }\n\n def encode_simple(self, var_name):\n return 'static_cast(%s ? 1 : 0)' % var_name\n\n\ntuint8 = FixedIntType('uint8_t')\ntbool = BoolType('bool')\ntuint32 = VaryingIntType('uint32_t')\ntstring = StringType('QString')\n\n\nclass Field:\n def __init__(self, field_type, field_name):\n \"\"\"\n :type field_type: Type\n :type field_name: str\n \"\"\"\n self.type = field_type\n self.name = field_name\n\n def __str__(self):\n return '%s %s' % (self.type, self.name)\n\n def decl_ctor(self):\n return '%s %s' % (self.type.for_param(), self.name)\n\n def decl_field(self):\n return '%s %s;' % (self.type.for_field(), self.name)\n\n def ctor_init(self):\n return ', %s(%s)' % (self.name, self.name)\n\n def encode_lines(self, simple, qualifier=None, use_getter=True):\n if qualifier:\n name = '%s.%s' % (qualifier, self.name)\n if use_getter:\n name += '()'\n else:\n name = self.name\n\n return self.type.encode(simple, name)\n\n def decode_lines(self, array_name, use_setter=False):\n return self.type.decode(self.name, array_name, use_setter)\n\n\ntpoint = StructType('QPoint', [Field(tuint32, 'x'), Field(tuint32, 'y')])\ntpointvector = ListType('QVector', tpoint)\ntcolor = StructType('QColor', [Field(tuint8, 'red'), Field(tuint8, 'green'), Field(tuint8, 'blue'), Field(tuint8, 'alpha')])\ntpolygon = ListType('QPolygon', tpoint)\ntstroke = StructType('Stroke', [Field(tcolor, 'color'), Field(tbool, 'isEraser'), Field(tpolygon, 'polygon'), Field(tuint32, \"brushSize\")], False)\ntpointstroke = ListType('QVector', tstroke)\n\n\nclass MsgClass:\n def __init__(self, name, fields):\n \"\"\"\n :type name: str\n :type fields: list[Field]\n \"\"\"\n self.name = name\n self.fields = fields\n\n def cls_caps(self):\n return ''.join(('_'+c) if c.isupper() else c.upper() for c in self.name)[1:]\n\n def gen_encode(self):\n snippets = []\n if len(self.fields) > 1:\n snippets.append('QByteArray array;')\n for f in self.fields:\n snippets.append(f.encode_lines(False))\n snippets.append('return array;')\n elif len(self.fields) == 1:\n snippets.append('return %s;' % self.fields[0].encode_lines(True))\n else:\n snippets.append('return {};')\n return '\\n\\n'.join(' ' + s for s in snippets)\n\n def gen_decode(self):\n snippets = []\n if len(self.fields) > 1 or not self.fields[0].type.decoded_readonly():\n snippets.append('QByteArray m = data;')\n array_name = 'm'\n elif len(self.fields) == 1:\n array_name = 'data'\n for f in self.fields:\n snippets.append(f.decode_lines(array_name))\n return '\\n\\n'.join(' ' + s for s in snippets)\n\n def write_to(self, f):\n if self.fields:\n decl_field = '''\n %s\n''' % '\\n '.join(f.decl_field() for f in self.fields)\n decode_body = '''\n%s\n ''' % self.gen_decode()\n else:\n decl_field = ''\n decode_body = ''\n f.write('''\nstruct %(cls)s : MessageBase{%(decl_field)s\n %(explicit_ctor)s%(cls)s(%(decl_ctor)s)\n : MessageBase(%(cls_caps)s)%(ctor_init)s {}\n\n QByteArray encodeMessageBody() const override {\n%(encode)s\n }\n\n explicit %(cls)s(const QByteArray& data)\n : MessageBase(%(cls_caps)s) {%(decode)s}\n};\n''' % {\n 'cls': self.name,\n 'cls_caps': self.cls_caps(),\n 'decl_field': decl_field,\n 'decl_ctor': ', '.join(f.decl_ctor() for f in self.fields),\n 'explicit_ctor': 'explicit ' if len(self.fields) == 1 else '',\n 'ctor_init': ''.join(f.ctor_init() for f in self.fields),\n 'encode': self.gen_encode(),\n 'decode': decode_body,\n })\n\n\nmsg_classes = [\n MsgClass('StringMessage', [Field(tstring, 'str')]),\n MsgClass('SetClientInfoMessage', [Field(tstring, 'name')]),\n MsgClass('PathMessage', [\n Field(tcolor, 'color'),\n Field(tuint32, 'layerId'),\n Field(tbool, 'isEraser'),\n Field(tpointvector, 'points'),\n Field(tuint32, 'brushSize'),\n ]),\n MsgClass('AddNewLayerMessage', [Field(tuint32, 'layerId'), Field(tstring, 'layerName')]),\n MsgClass('RenameLayerMessage', [Field(tuint32, 'layerId'), Field(tstring, 'layerName')]),\n MsgClass('MoveLayerMessage', [Field(tuint32, 'layerId'), Field(tuint32, 'newPos')]),\n MsgClass('RemoveLayerMessage', [Field(tuint32, 'layerId')]),\n MsgClass('RemoveLastStrokeMessage', [Field(tuint32, 'layerId')]),\n MsgClass('CopyLayerMessage', [Field(tuint32, 'fromUserId'), Field(tuint32, 'fromLayerId'), Field(tuint32, 'toLayerId')]),\n MsgClass('LayerContentsMessage', [Field(tuint32, 'layerId'), Field(tpointstroke, 'strokes'), Field(tstring, 'layerName')]),\n]\n\n\ndef main():\n with open('messages.h', 'w') as f:\n f.write('#pragma once\\n\\n')\n f.write('\\n'.join('#include <%s>' % inc for inc in sys_includes))\n f.write('\\n\\n')\n f.write('\\n'.join('#include \"%s\"' % inc for inc in includes))\n\n f.write('\\n')\n for cls in msg_classes:\n cls.write_to(f)\n\n with open('enums.h', 'w') as f:\n names = [msg.cls_caps() for msg in msg_classes]\n names[0] += ' = 1'\n f.write('''#pragma once\n#include \n\nenum MessageType {\n %s,\n};\n\nconst char* getMessageTypeString(uint32_t type);\n''' % ',\\n '.join(names))\n\n with open('enums.cpp', 'w') as f:\n f.write('''#include \"enums.h\"\n\nconst char* getMessageTypeString(uint32_t type) {\n switch (type) {\n %s\n }\n return \"\";\n}\n''' % '\\n '.join('case %s: return \"%s\";' % (msg.cls_caps(), msg.name) for msg in msg_classes))\n\n with open('MessageHandler.h', 'w') as f:\n f.write('''#pragma once\n\n#include \n\nclass QByteArray;\n\n%(cls_fwd)s\n\nclass MessageHandler {\npublic:\n void handle(uint32_t user, uint32_t messageType, const QByteArray& message);\n\nprotected:\n %(virt)s\n\n virtual ~MessageHandler() = default;\n};\n''' % {\n 'cls_fwd': '\\n'.join('class %s;' % msg.name for msg in msg_classes),\n 'virt': '\\n '.join('virtual void handle%(name)s(uint32_t user, const %(name)s& m) = 0;' % {'name': msg.name} for msg in msg_classes),\n })\n\n with open('MessageHandler.cpp', 'w') as f:\n f.write('''#include \"MessageHandler.h\"\n\n#include \"enums.h\"\n#include \"messages.h\"\n\nvoid MessageHandler::handle(uint32_t user, uint32_t messageType, const QByteArray& message) {\n switch (messageType) {%s\n }\n}\n''' % ''.join('''\n case %(NAME)s: {\n %(name)s msg(message);\n handle%(name)s(user, msg);\n break;\n }''' % {'name': msg.name, 'NAME': msg.cls_caps()} for msg in msg_classes))\n\nif __name__ == '__main__':\n main()\n","repo_name":"Kracav4ik/pencil_mob","sub_path":"src/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":12300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"34042899323","text":"from django.urls import path\n\nfrom exam_project.web.views import show_index, catalogue, create_product, product_details, product_edit, \\\n product_delete, product_buy, contact_us, edit_contact_us\n\nurlpatterns = (\n path('', show_index, name='show index'),\n path('catalogue/', catalogue, name='catalogue'),\n path('contacts/', contact_us, name='contact us'),\n path('contacts/edit/', edit_contact_us, name='edit contact us'),\n path('product/create/', create_product, name='create product'),\n path('product//details/', product_details, name='product details'),\n path('product//edit/', product_edit, name='product edit'),\n path('product//delete/', product_delete, name='product delete'),\n path('product//buy/', product_buy, name='product buy'),\n\n)\n","repo_name":"MartinP94O/exam_website_project","sub_path":"exam_project/web/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"11411509764","text":"from collections import deque\n\n\ndef person_is_seller(name):\n return name[-1] == 'm'\n\n\ndef search(name, graph_searched):\n search_queue = deque()\n search_queue += graph_searched[name]\n searched = [] # 用于记录已被查找的人\n while search_queue:\n person = search_queue.popleft()\n if person not in searched:\n if person_is_seller(person):\n print('{} is a mango seller!'.format(person))\n return True\n else:\n search_queue += graph[person]\n searched.append(person)\n return False\n\n# 建立图\ngraph = {}\n\ngraph['you'] = ['alice', 'bob', 'claire']\ngraph['bob'] = ['anuj', 'peggy']\ngraph['alice'] = ['peggy']\ngraph['claire'] = ['thom', 'jonny']\ngraph['anuj'] = []\ngraph['peggy'] = []\ngraph['thom'] = []\ngraph['jonny'] = []\n\nif not search('you', graph):\n print('no mango seller!')\n","repo_name":"pancerZH/algorithm_with_python","sub_path":"BFS.py","file_name":"BFS.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"70023013431","text":"from flask import render_template, Blueprint, flash\n\nfrom roc.logger import Logger\nfrom roc.models import User\n\nbp = Blueprint('home', __name__)\n\n\n\"\"\" Home page. \"\"\"\n@bp.route('/')\ndef home():\n user = User.get_logged_in()\n info = None if user is None else user.id\n Logger.log_event('visit-home-page', info=info)\n return render_template('home.html', user=user)\n","repo_name":"cammatsui/roc","sub_path":"roc/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"37899676146","text":"import torch\nimport torch.nn as nn\n\n\nclass Conv2Encoder(nn.Module):\n NAME = \"conv2\"\n\n def __init__(self, height, width, channel, out_dim, bootstrap_model=None):\n super(Conv2Encoder, self).__init__()\n\n self.height = height\n self.channel = channel\n self.width = width\n\n self.out_dim = out_dim\n\n self.model = nn.Sequential(\n nn.Conv2d(self.channel, 16, (8, 8), 4),\n nn.LeakyReLU(),\n nn.Conv2d(16, 32, (4, 4), 2),\n nn.LeakyReLU(),\n nn.Conv2d(32, 32, (4, 4), 1),\n nn.Flatten(),\n nn.Linear(128, out_dim),\n )\n\n if torch.cuda.is_available():\n self.cuda()\n\n if bootstrap_model is not None:\n self.load_state_dict(bootstrap_model.state_dict())\n\n def forward(self, img):\n return self.encode(img)\n\n def encode(self, img):\n return self.model(img)\n","repo_name":"microsoft/Intrepid","sub_path":"src/model/encoder/conv2_encoder.py","file_name":"conv2_encoder.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"94"} +{"seq_id":"74225920629","text":"from django.shortcuts import render, redirect\nfrom .models import TasksResults\nfrom leitura_arquivos import handling_tasks\nfrom .forms import IdForm\n\ndef entrega_token(request, task_id):\n return render(request, 'tarefas/task_id.html', {'task_id':task_id})\n\ndef consultar_sequencia(request):\n if request.method == 'POST':\n form = IdForm(request.POST)\n if form.is_valid():\n task_id = form.cleaned_data['task_id']\n try:\n task_id = TasksResults.objects.get(id_task=task_id)\n\n if len(task_id.result) <= 132:\n return redirect('ajuda')\n\n result, phases = handling_tasks(task_id.result)\n\n except TasksResults.DoesNotExist:\n message = 'Sua Sequência Ainda Não Está Alinhada.'\n return render(\n request,\n 'tarefas/consulta_sequencia.html',\n {'form':form,\n 'message_error':message,\n 'task_id':task_id\n }\n )\n\n return render(\n request,\n 'tarefas/resultado_db.html',\n {\n 'task_result': result,\n 'phase_one': phases[0],\n 'phase_two': phases[1],\n 'similarity': phases[2]\n }\n )\n else:\n form = IdForm()\n return render(request, 'tarefas/consulta_sequencia.html', {'form':form})\n","repo_name":"danielsundfeld/pastar_web_webserver","sub_path":"webserver/apps/tarefas/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"30838888825","text":"'''\nLibrary for basic video functions\n'''\n\n# Imports\nimport os\nimport cv2\nimport functools\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\n\nfrom Utils import VideoUtils\n\n# Main Functions\n# Utils Functions\n\n# Image Effect Transistions\ndef EffectTransistion_Apply(I, EFFECT_TREE, TREE_APPLY_FUNC, n_frames=2, PROGRESS_BAR=None):\n '''\n Effect Transistion - Apply\n '''\n # Init\n Transistion_Is = []\n FRAME_PROPS = np.linspace(0.0, 1.0, n_frames)\n # Loop\n for i in range(n_frames):\n ## Update Current Params for each node\n for nk in EFFECT_TREE[\"nodes\"].keys():\n CurParams = {\n pk: EFFECT_TREE[\"nodes\"][nk].parent.effect[\"transistion\"][pk][\"transistion\"][\"func\"](\n FRAME_PROPS[i],\n EFFECT_TREE[\"nodes\"][nk].parent.effect[\"transistion\"][pk][\"start\"],\n EFFECT_TREE[\"nodes\"][nk].parent.effect[\"transistion\"][pk][\"end\"],\n **EFFECT_TREE[\"nodes\"][nk].parent.effect[\"transistion\"][pk][\"transistion\"][\"params\"]\n )\n for pk in EFFECT_TREE[\"nodes\"][nk].parent.effect[\"transistion\"].keys()\n }\n EFFECT_TREE[\"nodes\"][nk].parent.effect[\"params\"] = CurParams\n ## Apply and Record\n frame = TREE_APPLY_FUNC(I, EFFECT_TREE=EFFECT_TREE)\n Transistion_Is.append(frame)\n ## Update Progress Bar\n if not PROGRESS_BAR == None: PROGRESS_BAR((i+1)/n_frames)\n \n return Transistion_Is\n\n# Transistion Selector Functions\ndef EffectTransistionSelect_Basic(param_start, param_end):\n '''\n Effect Transistion Select - Basic\n '''\n # Select Based on param type\n # Non Numeric\n if param_start is None or param_end is None:\n return [\"Constant\", \"Switch\"]\n if isinstance(param_start, str) or isinstance(param_end, str):\n return [\"Constant\", \"Switch\"]\n # Numeric\n if isinstance(param_start, int) and isinstance(param_end, int):\n return list(TRANSISTION_FUNCS.keys())\n if isinstance(param_start, float) and isinstance(param_end, float):\n return list(TRANSISTION_FUNCS.keys())\n # N-D Array\n if isinstance(param_start, list) and isinstance(param_end, list):\n param_start_elem = param_start\n while isinstance(param_start_elem, list): param_start_elem = param_start_elem[0]\n param_end_elem = param_end\n while isinstance(param_end_elem, list): param_end_elem = param_end_elem[0]\n return EffectTransistionSelect_Basic(param_start_elem, param_end_elem)\n # Unknown\n return [\"Constant\", \"Switch\"]\n\n# Transistion Functions\ndef EffectTransistion_Constant(prop, start, end):\n '''\n Effect Transistion - Constant\n '''\n return start\n\ndef EffectTransistion_Switch(prop, start, end, threshold=0.5):\n '''\n Effect Transistion - Switch\n '''\n return start if prop < threshold else end\n\ndef EffectTransistion_Linear(prop, start, end):\n '''\n Effect Transistion - Linear\n '''\n return start + (end-start)*prop\n\ndef EffectTransistion_Sin(prop, start, end, frequency=1.0):\n '''\n Effect Transistion - Sin\n '''\n return start + (end-start)*np.sin(prop*(2*np.pi)*frequency)\n\n# Main Vars\nTRANSISTION_FUNCS = {\n \"Constant\": {\n \"name\": \"Constant\",\n \"func\": EffectTransistion_Constant,\n \"params\": {}\n },\n \"Switch\": {\n \"name\": \"Switch\",\n \"func\": EffectTransistion_Switch,\n \"params\": {\n \"threshold\": 0.5\n }\n },\n \"Linear\": {\n \"name\": \"Linear\",\n \"func\": EffectTransistion_Linear,\n \"params\": {}\n },\n \"Sin\": {\n \"name\": \"Sin\",\n \"func\": EffectTransistion_Sin,\n \"params\": {\n \"frequency\": 1.0\n }\n }\n}","repo_name":"KausikN/VidFX","sub_path":"Utils/EffectTransistionUtils.py","file_name":"EffectTransistionUtils.py","file_ext":"py","file_size_in_byte":3783,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"94"} +{"seq_id":"33796627141","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Comment',\n fields=[\n ('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),\n ('body', models.TextField()),\n ('score', models.IntegerField(default=0)),\n ('pub_date', models.DateTimeField(auto_now_add=True)),\n ('comment_type', models.IntegerField(default=1, choices=[(1, 'Comment'), (2, 'Review')])),\n ('rating', models.IntegerField(default=None, choices=[(1, 'Horrible'), (2, 'Bad'), (3, 'Okay'), (4, 'Good'), (5, 'Brilliant')], blank=True, null=True)),\n ('author', models.ForeignKey(default='', related_name='comments', to=settings.AUTH_USER_MODEL)),\n ('parent', models.ForeignKey(default=None, null=True, blank=True, to='comments.Comment', related_name='children')),\n ],\n ),\n ]\n","repo_name":"lumenwrites/fictionhub_old","sub_path":"fictionhub/comments/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"94"} +{"seq_id":"29527419595","text":"import libreria\n\n\ndef agregarcelular():\n celular=libreria.pedir_celular(\"Ingrese Marca del telefono:\")\n pais=libreria.pedir_pais(\"Ingrese País de origen del telefono:\")\n costo=libreria.pedir_numero(\"Ingrese costo del telefono:\", 1000, 3000)\n contenido=celular + \"-\" + str(pais) + \"-\" + str(costo) + \"\\n\"\n libreria.guardar_datos(\"info.txt\", contenido, \"a\")\n print(\"Datos guardados\")\n\ndef verDatos():\n datos = libreria.obtener_datos(\"info.txt\")\n if ( datos != \"\"):\n print(datos)\n else:\n print(\"Archivo sin datos\")\n\nopc=0\nmax=3\nwhile(opc != max):\n print(\"######## MENU #######\")\n print(\"1. Agregar Celular\")\n print(\"2. ver datos\")\n print(\"3. Salir\")\n print(\"#####################\")\n\n opc=libreria.pedir_numero(\"Ingresar Opcion:\", 1, 3)\n\n if (opc == 1):\n agregarcelular()\n\n if (opc == 2):\n verDatos()\n\n\n#fin_menu\nprint(\"Fin del programa\")\n\n","repo_name":"JhordanRojas/t10_Rojas.Linares","sub_path":"Linares/menu/app4.py","file_name":"app4.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"72483639349","text":"#!/usr/bin/env python3\n\"\"\"\nPlotting functions for files output from the OpenABM-Covid19 model\n\nCreated: 30 March 2020\nAuthor: p-robot\n\"\"\"\n\nfrom os.path import join\n\nimport numpy as np, pandas as pd\nfrom pandas.api.types import CategoricalDtype\nfrom scipy.stats import gamma\n\nfrom matplotlib import pyplot as plt\nfrom matplotlib import cm\n\n# Colours for plotting the household, work, and random networks\nnetwork_colours = ['#009E73', '#0072B2', '#D55E00']\n\n# Nicely printed labels of event types from the EVENT_TYPES enum \n# as defined in OpenABM-Covid19/src/constant.h\nEVENT_TYPE_STRING = {\n 0: \"Susceptible\",\n 1: \"Presymptomatic (severe)\",\n 2: \"Presymptomatic (mild)\",\n 3: \"Asymptomatic\",\n 4: \"Symptomatic (severe)\",\n 5: \"Symptomatic (mild)\",\n 6: \"Hospitalised\",\n 7: \"ICU\", \n 8: \"Recovering in hospital\",\n 9: \"Recovered\",\n 10: \"Dead\",\n 11: \"Quarantined\", \n 12: \"Quarantined release\",\n 13: \"Test taken\", \n 14: \"Test result\",\n 15: \"Case\", \n 16: \"Trace token release\",\n 17: \"Transition to hospital\",\n 18: \"Transition to critical\",\n 19: \"N event types\"\n}\n\nkey_params = [\n \"n_total\", \n \"n_seed_infection\", \n \"infectious_rate\", \n \"asymptomatic_infectious_factor\", \n \"mild_infectious_factor\",\n \"daily_non_cov_symptoms_rate\"]\n\nsensitivity_analysis_params = [\n \"mean_time_to_symptoms\",\n \"fraction_asymptomatic_0_9\",\n \"fraction_asymptomatic_10_19\",\n \"fraction_asymptomatic_20_29\",\n \"fraction_asymptomatic_30_39\",\n \"fraction_asymptomatic_40_49\",\n \"fraction_asymptomatic_50_59\",\n \"fraction_asymptomatic_60_69\",\n \"fraction_asymptomatic_70_79\",\n \"fraction_asymptomatic_80\",\n \"mild_fraction_0_9\",\n \"mild_fraction_10_19\",\n \"mild_fraction_20_29\",\n \"mild_fraction_30_39\",\n \"mild_fraction_40_49\",\n \"mild_fraction_50_59\",\n \"mild_fraction_60_69\",\n \"mild_fraction_70_79\",\n \"mild_fraction_80\",\n \"daily_non_cov_symptoms_rate\",\n \"relative_susceptibility_0_9\",\n \"relative_susceptibility_10_19\",\n \"relative_susceptibility_20_29\",\n \"relative_susceptibility_30_39\",\n \"relative_susceptibility_40_49\",\n \"relative_susceptibility_50_59\",\n \"relative_susceptibility_60_69\",\n \"relative_susceptibility_70_79\",\n \"relative_susceptibility_80\",\n \"mean_infectious_period\",\n \"relative_transmission_household\"]\n\nasymptomatic_cols = [\n \"fraction_asymptomatic_0_9\",\n \"fraction_asymptomatic_10_19\",\n \"fraction_asymptomatic_20_29\",\n \"fraction_asymptomatic_30_39\",\n \"fraction_asymptomatic_40_49\",\n \"fraction_asymptomatic_50_59\",\n \"fraction_asymptomatic_60_69\",\n \"fraction_asymptomatic_70_79\",\n \"fraction_asymptomatic_80\"]\n\nsusceptibility_cols = [\n \"relative_susceptibility_0_9\",\"relative_susceptibility_10_19\",\n \"relative_susceptibility_20_29\", \"relative_susceptibility_30_39\", \n \"relative_susceptibility_40_49\", \"relative_susceptibility_50_59\",\n \"relative_susceptibility_60_69\", \"relative_susceptibility_70_79\", \n \"relative_susceptibility_80\"]\n\nhospitalised_cols = [\n \"hospitalised_fraction_0_9\", \"hospitalised_fraction_10_19\",\n \"hospitalised_fraction_20_29\", \"hospitalised_fraction_30_39\", \n \"hospitalised_fraction_40_49\", \"hospitalised_fraction_50_59\",\n \"hospitalised_fraction_60_69\", \"hospitalised_fraction_70_79\", \n \"hospitalised_fraction_80\"]\n\nfatality_cols = [\n \"fatality_fraction_0_9\", \"fatality_fraction_10_19\", \n \"fatality_fraction_20_29\", \"fatality_fraction_30_39\", \n \"fatality_fraction_40_49\", \"fatality_fraction_50_59\",\n \"fatality_fraction_60_69\", \"fatality_fraction_70_79\", \n \"fatality_fraction_80\"]\n\ncritical_cols = [\n \"critical_fraction_0_9\", \"critical_fraction_10_19\",\n \"critical_fraction_20_29\", \"critical_fraction_30_39\", \n \"critical_fraction_40_49\", \"critical_fraction_50_59\",\n \"critical_fraction_60_69\", \"critical_fraction_70_79\", \n \"critical_fraction_80\"]\n\nmild_cols = [\n \"mild_fraction_0_9\", \"mild_fraction_10_19\",\n \"mild_fraction_20_29\", \"mild_fraction_30_39\", \n \"mild_fraction_40_49\", \"mild_fraction_50_59\",\n \"mild_fraction_60_69\", \"mild_fraction_70_79\", \n \"mild_fraction_80\"]\n\napp_users_cols = [\n \"app_users_fraction_0_9\", \"app_users_fraction_10_19\",\n \"app_users_fraction_20_29\", \"app_users_fraction_30_39\",\n \"app_users_fraction_40_49\", \"app_users_fraction_50_59\",\n \"app_users_fraction_60_69\", \"app_users_fraction_70_79\",\n \"app_users_fraction_80\"]\n\npopulation_cols = [\n \"population_0_9\", \"population_10_19\",\n \"population_20_29\", \"population_30_39\",\n \"population_40_49\", \"population_50_59\", \n \"population_60_69\", \"population_70_79\", \n \"population_80\"]\n\n\ndef get_df_from_params(params, parameter_names):\n \"\"\"\n Return a pandas dataframe of parameter-value pairs from the passed Parameters object\n Mainly used as a helper function for displaying parameter values. \n \n Arguments\n ---------\n params : parameters object of class COVID19.model.Parameters\n Parameter object\n parameter_names : list of str\n List of parameter names of interest (column names of parameter input file)\n \n Returns\n -------\n pandas.DataFrame of the parameters of interest\n \"\"\"\n parameter_values = [params.get_param(p) for p in parameter_names]\n df = pd.DataFrame([parameter_values], columns = parameter_names)\n return(df)\n\n\ndef gamma_params(mn, sd):\n \"\"\"\n Return scale and shape parameters from a Gamma distribution from input mean and sd\n \n Arguments\n ---------\n mn : float\n Mean of the gamma distribution\n sd : float\n Standard deviation of the gamma distribution\n \"\"\"\n scale = (sd**2)/mn\n shape = mn/scale\n \n return(shape, scale)\n\n\ndef overlapping_bins(start, stop, window, by):\n \"\"\"Generate overlapping bins\"\"\"\n \n bins = []\n for i in np.arange(start, stop - window + 1, step = by):\n bins.append((i, i + window))\n return(bins)\n\n\ndef get_discrete_viridis_colours(n):\n \"\"\"\n Generate n colours from the viridis colour map\n \n Arguments\n ---------\n n : int\n Number of colours to generate on the viridis colour map\n \n Returns\n -------\n List of length n where each elements is an RGBA list defining a colour\n \"\"\"\n colourmap = cm.get_cmap('viridis', n)\n colours = [colourmap.colors[n - i - 1] for i in range(n)]\n return(colours)\n\n\ndef plot_parameter_assumptions(df_parameters, xlimits = [0, 30], lw = 3):\n \"\"\"\n Plot distributions of mean transition times between compartments in the parameters of the \n OpenABM-Covid19 model\n \n Arguments\n ---------\n df_parameters : pandas.DataFrame\n DataFrame of parameter values as input first input argument to the OpenABM-Covid19 model\n This plotting scripts expects the following columns within this dataframe: \n mean_time_to_hospital\n mean_time_to_critical, sd_time_to_critical\n mean_time_to_symptoms, sd_time_to_symptoms\n mean_infectious_period, sd_infectious_period\n mean_time_to_recover, sd_time_to_recover\n mean_asymptomatic_to_recovery, sd_asymptomatic_to_recovery\n mean_time_hospitalised_recovery, sd_time_hospitalised_recovery\n mean_time_to_death, sd_time_to_death\n mean_time_critical_survive, sd_time_critical_survive\n \n xlimits : list of ints\n Limits of x axis of gamma distributions showing mean transition times\n lw : float\n Line width used in plotting lines of the PDFs\n \n Returns\n -------\n fig, ax : figure and axis handles to the generated figure using matplotlib.pyplot\n \"\"\"\n df = df_parameters # for brevity\n x = np.linspace(xlimits[0], xlimits[1], num = 50)\n \n fig, ax = plt.subplots(nrows = 3, ncols = 3)\n \n ####################################\n # Bernoulli of mean time to hospital\n ####################################\n \n height1 = np.ceil(df.mean_time_to_hospital.values[0]) - df.mean_time_to_hospital.values[0]\n height2 = df.mean_time_to_hospital.values[0] - np.floor(df.mean_time_to_hospital.values[0])\n \n x1 = np.floor(df.mean_time_to_hospital.values[0])\n x2 = np.ceil(df.mean_time_to_hospital.values[0])\n ax[0,0].bar([x1, x2], [height1, height2], color = \"#0072B2\")\n \n ax[0,0].set_ylim([0, 1.0])\n ax[0,0].set_xticks([x1, x2])\n ax[0,0].set_xlabel(\"Time to hospital\\n(from symptoms; days)\")\n ax[0,0].set_ylabel(\"Density\")\n ax[0,0].set_title(\"\")\n ax[0,0].spines[\"top\"].set_visible(False)\n ax[0,0].spines[\"right\"].set_visible(False)\n \n ####################################\n # Gamma of mean time to critical\n ####################################\n \n a, b = gamma_params(df.mean_time_to_critical.values, df.sd_time_to_critical.values)\n ax[1,0].plot(x, gamma.pdf(x, a = a, loc = 0, scale = b), linewidth= lw, color = \"#0072B2\")\n ax[1,0].axvline(df.mean_time_to_critical.values, color = \"#D55E00\", \n linestyle = \"dashed\", alpha = 0.7)\n ax[1,0].set_xlabel(\"Time to critical\\n(from hospitalised; days)\")\n ax[1,0].set_title(\"\")\n ax[1,0].spines[\"top\"].set_visible(False)\n ax[1,0].spines[\"right\"].set_visible(False)\n ax[1,0].text(0.9, 0.7, 'mean: {}\\nsd: {}'.format(df.mean_time_to_critical.values[0],\n df.sd_time_to_critical.values[0]), \n ha = 'right', va = 'center', transform = ax[1,0].transAxes)\n \n \n ################################\n # Gamma of mean time to symptoms\n ################################\n \n a, b = gamma_params(df.mean_time_to_symptoms.values, df.sd_time_to_symptoms.values)\n ax[0,1].plot(x, gamma.pdf(x, a = a, loc = 0, scale = b), linewidth= lw, color = \"#0072B2\")\n ax[0,1].axvline(df.mean_time_to_symptoms.values, color = \"#D55E00\", \n linestyle = \"dashed\", alpha = 0.7)\n ax[0,1].set_xlabel(\"Time to symptoms\\n(from presymptomatic; days)\")\n ax[0,1].set_title(\"\")\n ax[0,1].spines[\"top\"].set_visible(False)\n ax[0,1].spines[\"right\"].set_visible(False)\n ax[0,1].text(0.9, 0.7, 'mean: {}\\nsd: {}'.format(df.mean_time_to_symptoms.values[0],\n df.sd_time_to_symptoms.values[0]), \n ha = 'right', va = 'center', transform = ax[0,1].transAxes)\n \n ################################\n # Gamma of mean infectious period\n ################################\n \n a, b = gamma_params(df.mean_infectious_period, df.sd_infectious_period)\n ax[0,2].plot(x, gamma.pdf(x, a = a, loc = 0, scale = b), linewidth= lw, color = \"#0072B2\")\n ax[0,2].axvline(df.mean_infectious_period.values, color = \"#D55E00\", \n linestyle = \"dashed\", alpha = 0.7)\n ax[0,2].set_xlabel(\"Infectious period (days)\")\n ax[0,2].set_title(\"\")\n ax[0,2].spines[\"top\"].set_visible(False)\n ax[0,2].spines[\"right\"].set_visible(False)\n ax[0,2].text(0.9, 0.7, 'mean: {}\\nsd: {}'.format(df.mean_infectious_period.values[0],\n df.sd_infectious_period.values[0]), \n ha = 'right', va = 'center', transform = ax[0,2].transAxes)\n \n ################################\n # Gamma of mean time to recover\n ################################\n \n a, b = gamma_params(df.mean_time_to_recover, df.sd_time_to_recover)\n ax[1,1].plot(x, gamma.pdf(x, a = a, loc = 0, scale = b), linewidth= lw, color = \"#0072B2\")\n ax[1,1].axvline(df.mean_time_to_recover.values, color = \"#D55E00\", \n linestyle = \"dashed\", alpha = 0.7)\n ax[1,1].set_xlabel(\"Time to recover\\n(from hospitalised or critical; days)\")\n ax[1,1].set_title(\"\")\n ax[1,1].spines[\"top\"].set_visible(False)\n ax[1,1].spines[\"right\"].set_visible(False)\n ax[1,1].text(0.9, 0.7, 'mean: {}\\nsd: {}'.format(df.mean_time_to_recover.values[0],\n df.sd_time_to_recover.values[0]), \n ha = 'right', va = 'center', transform = ax[1,1].transAxes)\n \n ########################################\n # Gamma of mean asymptomatic to recovery\n ########################################\n \n a, b = gamma_params(df.mean_asymptomatic_to_recovery, df.sd_asymptomatic_to_recovery)\n ax[2,0].plot(x, gamma.pdf(x, a = a, loc = 0, scale = b), linewidth= lw, color = \"#0072B2\")\n ax[2,0].axvline(df.mean_asymptomatic_to_recovery.values, color = \"#D55E00\", \n linestyle = \"dashed\", alpha = 0.7)\n ax[2,0].set_xlabel(\"Time to recover\\n(from asymptomatic; days)\")\n ax[2,0].set_title(\"\")\n ax[2,0].spines[\"top\"].set_visible(False)\n ax[2,0].spines[\"right\"].set_visible(False)\n ax[2,0].text(0.9, 0.7, 'mean: {}\\nsd: {}'.format(df.mean_asymptomatic_to_recovery.values[0],\n df.sd_asymptomatic_to_recovery.values[0]), \n ha = 'right', va = 'center', transform = ax[2,0].transAxes)\n \n ########################################\n # Gamma of mean hospitalised to recovery\n ########################################\n \n a, b = gamma_params(df.mean_time_hospitalised_recovery, df.sd_time_hospitalised_recovery)\n ax[2,1].plot(x, gamma.pdf(x, a = a, loc = 0, scale = b), linewidth= lw, color = \"#0072B2\")\n ax[2,1].axvline(df.mean_time_hospitalised_recovery.values, color = \"#D55E00\", \n linestyle = \"dashed\", alpha = 0.7)\n ax[2,1].set_xlabel(\"Time to recover\\n(from hospitalisation to hospital discharge if not ICU\\nor from ICU discharge to hospital discharge if ICU; days)\")\n ax[2,1].set_title(\"\")\n ax[2,1].spines[\"top\"].set_visible(False)\n ax[2,1].spines[\"right\"].set_visible(False)\n ax[2,1].text(0.9, 0.7, 'mean: {}\\nsd: {}'.format(df.mean_time_hospitalised_recovery.values[0],\n df.sd_time_hospitalised_recovery.values[0]), \n ha = 'right', va = 'center', transform = ax[2,1].transAxes)\n \n #############################\n # Gamma of mean time to death\n #############################\n \n a, b = gamma_params(df.mean_time_to_death.values, df.sd_time_to_death.values)\n ax[1,2].plot(x, gamma.pdf(x, a = a, loc = 0, scale = b), linewidth= lw, c = \"#0072B2\")\n ax[1,2].axvline(df.mean_time_to_death.values, color = \"#D55E00\", \n linestyle = \"dashed\", alpha = 0.7)\n ax[1,2].set_xlabel(\"Time to death\\n(from critical; days)\")\n ax[1,2].set_title(\"\")\n ax[1,2].spines[\"top\"].set_visible(False)\n ax[1,2].spines[\"right\"].set_visible(False)\n ax[1,2].text(0.9, 0.7, 'mean: {}\\nsd: {}'.format(df.mean_time_to_death.values[0],\n df.sd_time_to_death.values[0]), \n ha = 'right', va = 'center', transform = ax[1,2].transAxes)\n \n ########################################\n # Gamma of mean time to survive if critical: FIXME - definitions\n ########################################\n \n a, b = gamma_params(df.mean_time_critical_survive, df.sd_time_critical_survive)\n ax[2,2].plot(x, gamma.pdf(x, a = a, loc = 0, scale = b), linewidth= lw, color = \"#0072B2\")\n ax[2,2].axvline(df.mean_time_critical_survive.values, color = \"#D55E00\", \n linestyle = \"dashed\", alpha = 0.7)\n ax[2,2].set_xlabel(\"Time to survive\\n(if ICU; days)\")\n ax[2,2].set_title(\"\")\n ax[2,2].spines[\"top\"].set_visible(False)\n ax[2,2].spines[\"right\"].set_visible(False)\n ax[2,2].text(0.9, 0.7, 'mean: {}\\nsd: {}'.format(df.mean_time_critical_survive.values[0],\n df.sd_time_critical_survive.values[0]), \n ha = 'right', va = 'center', transform = ax[2,2].transAxes)\n \n plt.subplots_adjust(hspace = 0.5)\n \n return(fig, ax)\n\n\ndef plot_timeseries_curves(df_timeseries, xlimits = None, lw = 3, timevar = \"time\"):\n \"\"\"\n Plot population-level metrics of COVID19 outbreak through time\n \n By default, a figure with four subplots is returned, each with the following plotted:\n 1. Cumulative infected with SARS-CoV-2, cumulative recovered, number quarantined\n 2. Current number asymptomatic, pre-symptomatic, symtompatic, incident cases\n 3. Current number of deaths, hospitalisations, ICU cases\n 4. Current number of daily tests used\n \n Arguments\n ---------\n df_timeseries : pandas.DataFrame\n DataFrame of timeseries output from COVID19-IBM (output which is printed to stdout)\n xlimits : list of ints\n Limits of the x-axis (time)\n lw : float\n Line with used in the plots\n timevar : str\n Column name within df_timeseries that defines the x-axis\n \n Returns\n -------\n fig, ax : figure and axis handles to the generated figure using matplotlib.pyplot\n \"\"\"\n \n df = df_timeseries # for brevity, keeping input argument as-is since it's more descriptive\n \n df[\"daily_incidence\"] = np.insert(0, 0, np.diff(df.total_infected.values))\n \n # List of dictionaries of what to plot in each panel of the plot\n data = [{\n \"total_infected\": {\"label\": \"Total infected\", \"c\": \"red\", \"linestyle\": \"solid\"},\n \"n_recovered\": {\"label\": \"Total recovered\", \"c\": \"#009E73\", \"linestyle\": \"solid\"},\n \"n_quarantine\": {\"label\": \"Number in quarantine\", \"c\": \"grey\", \"linestyle\": \"solid\"}\n },\n {\n \"n_asymptom\": {\"label\": \"Asymptomatic\", \"c\": \"#E69F00\", \"linestyle\": \"solid\"},\n \"n_presymptom\": {\"label\": \"Presymptomatic\", \"c\": \"#CC79A7\", \"linestyle\": \"solid\"},\n \"n_symptoms\": {\"label\": \"Symptomatic\", \"c\": \"#D55E00\", \"linestyle\": \"solid\"},\n \"daily_incidence\": {\"label\": \"Incident cases\", \"c\": \"red\", \"linestyle\": \"solid\"}\n },\n {\n \"n_death\": {\"label\": \"Deaths\", \"c\": \"black\", \"linestyle\": \"dashed\"},\n \"n_hospital\": {\"label\": \"Number hospitalised\", \"c\": \"#56B4E9\", \"linestyle\": \"solid\"},\n \"n_critical\": {\"label\": \"ICU cases\", \"c\": \"#0072B2\", \"linestyle\": \"solid\"}\n },\n {\n \"n_tests\": {\"label\": \"Tests used\", \"c\": \"black\", \"linestyle\": \"solid\"}\n }]\n \n fig, ax = plt.subplots(nrows = len(data))\n \n for i, panel in enumerate(data):\n \n maximums = []\n for var, props in panel.items():\n \n ax[i].plot(df[timevar], df[var], \n c = props[\"c\"], \n linestyle = props[\"linestyle\"],\n linewidth = lw, \n label = props[\"label\"]\n )\n maximums.append(df[var].max())\n \n ax[i].fill_between(df[timevar], 0, np.max(maximums), \n where = (df[\"lockdown\"] == 1), alpha = 0.5)\n \n ax[i].set_ylabel(\"\", size = 18)\n ax[i].spines[\"top\"].set_visible(False)\n ax[i].spines[\"right\"].set_visible(False)\n ax[i].legend(loc = \"center left\")\n ax[i].set_xlim(xlimits)\n \n for tick in ax[i].xaxis.get_major_ticks():\n tick.label.set_fontsize(14)\n \n for tick in ax[i].yaxis.get_major_ticks():\n tick.label.set_fontsize(12)\n \n if i == 3:\n ax[i].set_xlabel(\"Day since infection seeded\", size = 18)\n else:\n ax[i].set_xticks([])\n ax[i].set_xlabel(\"\")\n \n return(fig, ax)\n\n\ndef plot_hist_by_group(df, groupvar, binvar, bins = None, groups = None, \n group_labels = None, group_colours = None, xlimits = None, density = False, \n title = \"\", xlabel = \"\", ylabel = \"\", legend_title = \"\", xticklabels = None):\n \"\"\"\n Histogram with multiple groups, with histogram bars plotted side-by-side for each group\n \n Arguments\n ---------\n df : pandas.DataFrame\n DataFrame of model output\n groupvar : str\n Column name of `df` which stores the grouping variable\n binvar : str\n Column name of `df` over which values will be binned \n bin : int or list\n Either a number of bins or list of bins to use\n groups : list\n Subset of categories in `group` column to plot (defaults to unique values in `groupvar` col)\n group_labels : list\n Labels to use for `groups` categories (defaults to `groups` list)\n group_colours : list\n Colours to use for the different `groups` categories (defaults to using the viridis \n colour map with n_groups)\n xlimits : float\n Limit of the x-axis\n density : boolean\n Should histogram be normalised (passed to density arg in np.histogram)\n title, xlabel, ylabel, legend_title : str\n Title, X-axis label, Y-axis label, and legend title respectively\n xticklabels : list of str\n Labels to use for x-ticks\n \n Returns\n -------\n fig, ax : figure and axis handles to the generated figure using matplotlib.pyplot\n \"\"\"\n \n if not groups:\n groups = df[groupvar].unique()\n \n if not group_labels:\n group_labels = groups\n \n n_groups = len(groups)\n \n if not isinstance(bins, list):\n if binvar == \"age_group\":\n bin_list = np.arange(0, bins + 1) - 0.1\n else:\n bin_list = np.arange(bins)\n else:\n bin_list = bins\n \n if group_colours is None:\n group_colours = get_discrete_viridis_colours(n_groups)\n \n width = np.diff(bin_list)[0]/(n_groups + 1)\n \n fig, ax = plt.subplots()\n #ax.grid(which = 'major', axis = 'y', alpha = 0.7, zorder = 0)\n \n for i, g in enumerate(groups):\n heights, b = np.histogram(df.loc[df[groupvar] == g][binvar], bin_list, density = density)\n \n ax.bar(bin_list[:-1] + width*i, heights, width = width, facecolor = group_colours[i],\n label = group_labels[i], edgecolor = \"#0d1a26\", linewidth = 0.5, zorder = 3)\n \n ax.set_xlim([-0.5, np.max(bin_list)])\n\n legend = ax.legend(loc = 'best', borderaxespad = 0, frameon = False, \n prop = {'size': 16}, fontsize = \"x-large\")\n legend.set_title(legend_title, prop = {'size':18})\n \n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"right\"].set_visible(False)\n\n ax.set_xlabel(xlabel, size = 18)\n ax.set_ylabel(ylabel, size = 18)\n ax.set_title(title, size = 20)\n \n if xlimits is not None:\n ax.set_xlim(xlimits)\n \n if xticklabels is not None:\n ax.set_xticks(bin_list + n_groups/2*width - width/2.)\n ax.set_xticklabels(xticklabels, size = 14)\n \n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(14)\n \n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(14)\n \n return(fig, ax)\n\n\ndef add_heatmap_to_axes(ax, x, y, bin_list):\n \"\"\"\n Plot heatmap of 2D histogram.\n \n Used for 2D histograms of transmission events across two grouping variables (e.g. age)\n \n Arguments\n ---------\n ax : object of matplotlib class `Axes`\n Axis object of where to add a heatmap\n x : np.array\n Array of the x values with which to create a histogram\n y : np.array\n Array of the y values with which to create a histogram\n bin_list : list\n List of bins to use in the histogram\n \n Returns\n -------\n (ax, im)\n ax : object of matplotlib class `Axes`\n updated Axes object\n im : matplotlib.image.AxesImage\n AxesImage object returned from matplotlib.pyplot.imshow\n \"\"\"\n \n array, xbins, ybins = np.histogram2d(x, y, bin_list)\n \n im = ax.imshow(array, origin = \"lower\", aspect = \"equal\", vmin = 0)\n \n return(ax, im)\n\n\ndef adjust_ticks(ax, xtick_fontsize = 12, ytick_fontsize = 12, \n xticklabels = None, yticklabels = None):\n \"\"\"\n Adjust tick font size and ticklabels in a matplotlib.Axes object\n \n Arguments\n ---------\n ax : object of matplotlib class `Axes`\n Axis object of where to adjust tick fonts/labels\n xtick_fontsize, ytick_fontsize : int\n Font size of x-ticks and y-ticks\n xticklabels, yticklabels : list of str\n List of x and y axis tick labels to change\n \n Returns\n -------\n ax : object of matplotlib class `Axes`\n Returns the modified axis object\n \"\"\"\n \n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(xtick_fontsize)\n \n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(ytick_fontsize)\n \n if xticklabels is not None:\n ax.set_xticks(np.arange(len(xticklabels)))\n ax.set_xticklabels(xticklabels, rotation = 60)\n \n if yticklabels is not None:\n ax.set_yticks(np.arange(len(yticklabels)))\n ax.set_yticklabels(yticklabels)\n \n return(ax)\n\n\ndef plot_transmission_heatmap_by_age(df, group1var, group2var, bins = None, \n group_labels = None, xlabel = \"\", ylabel = \"\", title = \"\", legend_title = \"\", \n legend_loc = \"right\", xticklabels = None, yticklabels = None, normalise = False, \n vmin = 0, vmax = None):\n \"\"\"\n Plot 2D histogram (as a heatmap) of transmission events by two grouping variables\n (for instance, age group)\n \n \n Returns\n -------\n fig, ax : figure and axis handles to the generated figure using matplotlib.pyplot\n \n \"\"\"\n if not isinstance(bins, list):\n bin_list = np.arange(bins)\n \n fig, ax = plt.subplots()\n \n ax, im = add_heatmap_to_axes(ax, df[group1var].values, df[group2var].values, bin_list)\n \n ax = adjust_ticks(ax, xtick_fontsize = 16, ytick_fontsize = 16, \n xticklabels = xticklabels, yticklabels = yticklabels)\n \n ax.set_xlabel(xlabel, size = 20)\n ax.set_ylabel(ylabel, size = 20)\n ax.set_title(title)\n \n cbar = fig.colorbar(im, fraction = 0.046, pad = 0.04)\n cbar.set_label(legend_title, size = 18)\n cbar.ax.tick_params(labelsize = 14)\n \n return(fig, ax)\n\n\ndef transmission_heatmap_by_age_by_panels(df, \n group1var, group2var, panelvar, bins = None, \n groups = None, group_labels = None,\n panels = None, panel_labels = None,\n xlabel = \"\", ylabel = \"\",\n legend_title = \"\", legend_loc = \"right\",\n xticklabels = None, yticklabels = None,\n normalise = False, title_fontsize = 20,\n spines = False\n ):\n \"\"\"\n Plot subplots of heatmaps of transmissions from one age group to another across another \n categorical variable (panelvar)\n \n Arguments\n ---------\n group1var : str\n Column name of first grouping variable (x-axis in heatmap)\n group2var : str\n Column name of second grouping variable (x-axis in heatmap)\n panelvar\n Column name of variable for making panels\n NBINS\n Number of bins\n group_labels\n normalise\n \n \"\"\"\n \n if not isinstance(bins, list):\n bin_list = np.arange(bins)\n \n if not panels: \n panels = np.unique(df[panelvar])\n \n n_panels = len(panels)\n \n if not panel_labels:\n panel_labels = panels\n \n fig, ax = plt.subplots(ncols = n_panels)\n \n ax[0].set_ylabel(ylabel, size = 16)\n \n transmission_arrays = []\n for i, panel in enumerate(panels):\n \n df_sub = df.loc[df[panelvar] == panel]\n \n array, xbins, ybins = np.histogram2d(\n x = df_sub[group1var].values, \n y = df_sub[group2var].values, \n bins = bin_list)\n transmission_arrays.append(array)\n \n vmin_panels = 0\n vmax_panels = np.max(np.array(transmission_arrays))\n \n ims = []\n\n for i, panel in enumerate(panels):\n im = ax[i].imshow(np.ma.masked_where(transmission_arrays[i] == 0, transmission_arrays[i]), \n origin = \"lower\", aspect = \"equal\", \n vmin = vmin_panels, vmax = vmax_panels)\n \n ims.append(im)\n ax[i] = adjust_ticks(ax[i], xtick_fontsize = 14, ytick_fontsize = 14, \n xticklabels = xticklabels, yticklabels = yticklabels)\n \n if i > 0:\n ax[i].set_yticks([])\n \n ax[i].set_xlabel(xlabel, size = 16)\n ax[i].set_title(panel_labels[i], size = title_fontsize)\n \n if not spines:\n ax[i].spines[\"top\"].set_visible(False)\n ax[i].spines[\"right\"].set_visible(False)\n ax[i].spines[\"bottom\"].set_visible(False)\n ax[i].spines[\"left\"].set_visible(False)\n \n fig.subplots_adjust(right = 0.85)\n axes_cbar = fig.add_axes([0.9, 0.3, 0.02, 0.4])\n cbar = fig.colorbar(ims[n_panels - 1], cax = axes_cbar)\n \n cbar.set_label(legend_title, size = 18)\n \n return(fig, ax)\n\n\n\ndef plot_interactions_by_age(df_interact, groupvar, group_labels, \n xlabel = \"\", ylabel = \"\", legend_title = \"\", title = \"\", nbins = 40):\n \"\"\"\n \"\"\"\n \n # Aggregate by age group and ID\n df_agg = df_interact.groupby([groupvar, \"ID_1\"]).size().reset_index(name = \"counts\")\n \n # Define age groups, size of bins\n groups = np.unique(df_agg[groupvar])\n n_groups = len(groups)\n bins = np.arange(nbins)\n \n # Split by age group\n hists = [df_agg.loc[df_agg[groupvar] == g].counts for g in groups]\n \n # Define the colourmap\n colours = get_discrete_viridis_colours(n_groups)\n \n fig, ax = plt.subplots()\n #ax.grid(which = 'major', axis = 'y', alpha = 0.4, zorder = 0)\n ax.hist(hists, bins, stacked = True, label = group_labels, \n width = 0.8, color = colours, edgecolor = \"#0d1a26\", linewidth = 0.5, zorder = 3)\n \n ax.set_xlim([0, np.max(bins)])\n \n legend = ax.legend(loc = 'right', borderaxespad = 0, frameon = False, \n prop = {'size': 16}, fontsize = \"x-large\")\n \n legend.set_title(legend_title, prop = {'size':18})\n\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"right\"].set_visible(False)\n\n ax.set_xlabel(xlabel, size = 16)\n ax.set_ylabel(\"Count\", size = 16)\n ax.set_title(title, size = 20)\n \n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(16)\n \n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(16)\n \n return(fig, ax)\n\n\ndef PlotHistIFRByAge(df, \n numerator_var, denominator_var,\n age_group_var = \"age_group\",\n NBINS = None,\n group_labels = None,\n xlabel = \"\",\n xticklabels = None,\n density = False,\n ):\n \"\"\"\n Plot IFR by age. \n \"\"\"\n \n a = 1.0\n bins = np.arange(0, NBINS + 1) - 0.1\n n_age = len(np.unique(df[age_group_var]))\n \n fig, ax = plt.subplots()\n \n height_n, bins_n = np.histogram(df[df[numerator_var] > 0][age_group_var], \n bins, density = False)\n height_d, bins_d = np.histogram(df[df[denominator_var] > 0][age_group_var], \n bins, density = False)\n \n heights = np.divide(height_n, height_d)\n \n ax.bar(range(n_age), heights, align = \"center\",\n alpha = a, color = \"#0072B2\", edgecolor = \"#0d1a26\", linewidth = 0.5,\n zorder = 3)\n \n for bi in range(n_age):\n ax.text(bi, heights[bi], str(np.round(heights[bi], 2)), \n ha = \"center\", va = \"bottom\", color = \"grey\")\n \n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"right\"].set_visible(False)\n \n ax.set_xlim([-0.5, np.max(bins)+0.5])\n ax.set_ylim([0, np.max(heights)*1.1])\n \n if xticklabels is not None:\n ax.set_xticks(bins)\n ax.set_xticklabels(xticklabels, size = 12)\n \n ax.set_xlabel(xlabel, size = 18)\n ax.set_ylabel(\"Infection fatality ratio (IFR) by age\", size = 18)\n \n overall_ifr = height_n.sum()/height_d.sum()\n ax.text(0.05, 0.8, \"Overall IFR: {}\".format(np.round(overall_ifr, 4)), size = 18, \n ha = 'left', va = 'center', transform = ax.transAxes, color = \"black\")\n \n return(fig, ax)\n\n\ndef PlotHistByAge(df, \n groupvars, \n age_group_var = \"age_group\",\n NBINS = None,\n group_labels = None,\n xlabel = \"\",\n xticklabels = None,\n density = False,\n ylim = 0.5\n ):\n \"\"\"\n \n \"\"\"\n \n a = 1.0\n bins = np.arange(0, NBINS + 1) - 0.1\n \n # Define number of groups\n n_groups = len(groupvars)\n \n if group_labels is None:\n group_labels = groupvars\n \n fig, ax = plt.subplots(nrows = n_groups)\n \n for axi, var in enumerate(groupvars):\n height, bins, objs = ax[axi].hist(df[df[var] > 0][age_group_var], bins, width = 0.8, \n alpha = a, color = \"#0072B2\", edgecolor = \"#0d1a26\", linewidth = 0.5, \n zorder = 3, density = density)\n \n for bi in range(len(bins) - 1):\n ax[axi].text(bins[bi] + 0.425, height[bi], str(np.round(height[bi], 2)), \n ha = \"center\", va = \"bottom\", color = \"grey\", size = 12)\n \n ax[axi].set_xlim([0, np.max(bins)])\n ax[axi].spines[\"top\"].set_visible(False)\n ax[axi].spines[\"right\"].set_visible(False)\n ax[axi].set_ylim([0, ylim])\n ax[axi].text(0.02, 0.8, group_labels[axi], size = 18, \n ha = 'left', va = 'center', transform = ax[axi].transAxes, color = \"black\")\n \n if axi == (n_groups - 1):\n if xticklabels is not None:\n ax[axi].set_xticks(bins + 0.425)\n ax[axi].set_xticklabels(xticklabels, size = 12)\n else:\n ax[axi].set_xticks(bins + 0.425)\n ax[axi].set_xticklabels(bins, size = 12)\n else:\n ax[axi].set_xticks(bins + 0.425)\n ax[axi].set_xticks([])\n \n ax[n_groups-1].set_xlabel(xlabel, size = 18)\n \n plt.subplots_adjust(hspace = 0.5)\n \n return(fig, ax)\n\n\n\ndef plot_stacked_hist_by_group(df, \n groupvar, binvar,\n NBINS = None,\n groups = None,\n group_labels = None,\n xlabel = \"\",\n ylabel = \"\",\n title = \"\",\n legend_title = \"\", legend_loc = \"right\",\n xticklabels = None\n ):\n \"\"\"\n \"\"\"\n # Define groups and size of groups\n if not groups:\n groups = np.unique(df[groupvar])\n n_groups = len(groups)\n \n if group_labels is None:\n group_labels = groups\n \n # Split by group\n hists = [df.loc[df[groupvar] == state][binvar] for state in groups]\n \n # Define the colourmap\n colours = get_discrete_viridis_colours(n_groups)\n \n bins = np.arange(NBINS)\n \n fig, ax = plt.subplots()\n \n #ax.grid(which = 'major', axis = 'y', alpha = 0.4, zorder = 0)\n ax.hist(hists, bins, stacked = True, label = group_labels, width = 0.8, \n color = colours, edgecolor = \"#0d1a26\", linewidth = 0.5, \n zorder = 3)\n \n ax.set_xlim([0, np.max(bins)])\n \n legend = ax.legend(loc = legend_loc, borderaxespad = 0, \n frameon = False, prop = {'size': 16}, fontsize = \"x-large\")\n legend.set_title(legend_title, prop = {'size':18})\n\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"right\"].set_visible(False)\n\n ax.set_xlabel(xlabel, size = 16)\n ax.set_ylabel(ylabel, size = 16)\n ax.set_title(title, size = 20)\n \n if xticklabels is not None:\n ax.set_xticklabels(xticklabels)\n else:\n ax.set_xticks(bins + 0.45)\n ax.set_xticklabels(bins)\n \n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(16)\n \n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(16)\n \n return(fig, ax)\n\n\ndef PlotStackedHistByGroupByPanel(df, \n groupvar, countvar, panelvar,\n NBINS = None,\n group_labels = None,\n panel_labels = None,\n xlabel = \"\",\n ylabel = \"\",\n title = \"\",\n legend_title = \"\", legend_loc = \"right\",\n xticklabels = None,\n ylims = None\n ):\n \"\"\"\n \n \"\"\"\n \n a = 1.0\n \n # Define panels and size of panels\n panels = np.unique(df[panelvar])\n n_panels = len(panels)\n \n if panel_labels is None:\n panel_labels = panels\n \n # Define groups and size of groups\n groups = np.unique(df[groupvar])\n n_groups = len(groups)\n \n if group_labels is None:\n group_labels = groups\n \n # Define the colourmap\n colours = get_discrete_viridis_colours(n_groups)\n \n bins = np.arange(NBINS)\n \n fig, ax = plt.subplots(ncols = n_panels)\n \n for i, panel in enumerate(panels):\n \n # Split by group\n hists = [df.loc[(df[groupvar] == state) & (df[panelvar] == panel)][countvar] for state in groups]\n \n #ax[i].grid(which = 'major', axis = 'y', alpha = 0.4, zorder = 0)\n \n ax[i].hist(hists, bins, stacked = True, label = group_labels, width = 0.8, \n alpha = a, color = colours, edgecolor = \"#0d1a26\", linewidth = 0.5, \n zorder = 3)\n \n ax[i].set_xlim([0, np.max(bins)])\n\n ax[i].spines[\"top\"].set_visible(False)\n ax[i].spines[\"right\"].set_visible(False)\n\n ax[i].set_xlabel(xlabel, size = 16)\n ax[i].set_title(panel_labels[i], size = 20)\n \n if i == 0:\n ax[i].set_ylabel(ylabel, size = 16)\n \n xticks = np.arange(0, NBINS + 5, 5)\n ax[i].set_xticks(xticks)\n ax[i].set_xticklabels(xticks)\n if ylims:\n ax[i].set_ylim(ylims)\n \n legend = ax[i].legend(loc = legend_loc, borderaxespad = 0, \n frameon = False, prop = {'size': 10}, fontsize = \"x-large\")\n legend.set_title(legend_title, prop = {'size':18})\n\n return(fig, ax)\n","repo_name":"BDI-pathogens/OpenABM-Covid19","sub_path":"examples/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":36784,"program_lang":"python","lang":"en","doc_type":"code","stars":109,"dataset":"github-code","pt":"94"} +{"seq_id":"22352016756","text":"'''@file listener.py\ncontains the listener code'''\n\nimport tensorflow as tf\nfrom .encoder import Encoder\nfrom tfModels.tensor2tensor import common_attention\nfrom tfModels.tensor2tensor import common_layers\nfrom tfModels.layers import residual, ff_hidden\n\nfrom nabu.neuralnetworks.components import layer\n\n\nclass SelfAttention(Encoder):\n '''a listener object\n transforms input features into a high level representation'''\n\n def encode(self, features, len_feas):\n '''\n Create the variables and do the forward computation\n\n Args:\n inputs: [batch_size x time x ...] tensor\n input_seq_length: [batch_size] vector\n is_train: whether or not the network is in training mode\n\n Returns:\n - [bath_size x time x ...] tensor\n - [batch_size] tensor\n '''\n #add input noise\n num_blocks = self.args.model.encoder.num_blocks\n num_cell_units = self.args.model.encoder.num_cell_units\n num_heads = self.args.model.encoder.num_heads\n dropout = self.args.model.encoder.dropout\n attention_dropout_rate = self.args.model.encoder.attention_dropout_rate if self.is_train else 0.0\n residual_dropout_rate = self.args.model.encoder.residual_dropout_rate if self.is_train else 0.0\n\n # \"relu\": tf.nn.relu,\n # \"sigmoid\": tf.sigmoid,\n # \"tanh\": tf.tanh,\n # \"swish\": lambda x: x * tf.sigmoid(x),\n # \"glu\": lambda x, y: x * tf.sigmoid(y)}\n _ff_activation = tf.nn.relu\n\n encoder_output = features\n\n # Mask\n encoder_padding = tf.equal(tf.reduce_sum(encoder_output, -1), 0)\n encoder_attention_bias = common_attention.attention_bias_ignore_padding(encoder_padding)\n\n # Add positional signal\n encoder_output = common_attention.add_timing_signal_1d(encoder_output)\n # Dropout\n if dropout > 0 and self.is_train:\n encoder_output = tf.nn.dropout(encoder_output, keep_prob=1.0-dropout)\n\n # Blocks\n with tf.variable_scope(\"block_0\"):\n encoder_output = common_attention.multihead_attention(\n query_antecedent=encoder_output,\n memory_antecedent=None,\n bias=encoder_attention_bias,\n total_key_depth=num_cell_units,\n total_value_depth=num_cell_units,\n output_depth=num_cell_units,\n num_heads=num_heads,\n dropout_rate=attention_dropout_rate,\n name='encoder_self_attention',\n summaries=True)\n encoder_output = ff_hidden(\n inputs=encoder_output,\n hidden_size=4 * num_cell_units,\n output_size=num_cell_units,\n activation=_ff_activation)\n for i in range(1, num_blocks, 1):\n with tf.variable_scope(\"block_{}\".format(i)):\n # Multihead Attention\n encoder_output = residual(\n encoder_output,\n common_attention.multihead_attention(\n query_antecedent=encoder_output,\n memory_antecedent=None,\n bias=encoder_attention_bias,\n total_key_depth=num_cell_units,\n total_value_depth=num_cell_units,\n output_depth=num_cell_units,\n num_heads=num_heads,\n dropout_rate=attention_dropout_rate,\n name='encoder_self_attention',\n summaries=True),\n dropout_rate=residual_dropout_rate,\n index_layer=i*2-1)\n\n # Feed Forward\n encoder_output = residual(\n encoder_output,\n ff_hidden(\n inputs=encoder_output,\n hidden_size=4 * num_cell_units,\n output_size=num_cell_units,\n activation=_ff_activation),\n dropout_rate=residual_dropout_rate,\n index_layer=i*2)\n # Mask padding part to zeros.\n encoder_output *= tf.expand_dims(1.0 - tf.to_float(encoder_padding), axis=-1)\n\n return encoder_output, len_feas\n","repo_name":"eastonYi/eastonCode","sub_path":"tfSeq2SeqModels/encoders/self_attention.py","file_name":"self_attention.py","file_ext":"py","file_size_in_byte":4289,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"94"} +{"seq_id":"42721632876","text":" # -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 17 00:03:18 2022\n\n@author: pokey\n\nHere I will try to wirte the functions for the third time revised initial guess function where the user puts in the guess for the R_ion and the vlue of JN for the algorithm to find the corresponding\ninitial guess set.\n\n\n\n\"\"\"\nimport numpy as np\nfrom scipy.signal import argrelextrema\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\nfrom matplotlib.patches import Rectangle\nfrom matplotlib.text import Text\nfrom matplotlib.image import AxesImage\nimport numpy as np\nfrom numpy.random import rand\nfrom waiting import wait\nfrom scipy.optimize import fsolve\n\n\n#%%\n\n# def find_p(df):\n# wzlist = df[['frequency','impedance']].to_numpy()\n# zrlist = np.real(df['impedance'].values)\n# zilist = np.imag(df['impedance'].values)\n# R_n0 = zrlist[-1] #the last element of zrlist, corresponds to the end of the curve. Might need to revise for imcomplete circle\n# nhlist = np.array(argrelextrema(zilist, np.less))\n# whlist = np.real(wzlist[nhlist][0][: , 0])\n# w0 = min(whlist)\n# C_a = 1/(R_n0 * w0)\n# #print('R is ----', R_n0)\n# print( zrlist[-1] )\n# return C_a \n\nVT = 0.026 # = kT/q\nC_a = 2e-7\nC_b = 2e-7\nR_i = 5e8\nVb = .2\nC_g = 2.8e-8 \nJ_s = 7.1e-11\nnA = 1.93\nq = 1.6e-19\nT = 300\nkb = 1.38e-23\n\nclass init_guess_class:\n\n def __init__(self):\n self.C_A = None\n self.C_B = None\n self.R_ion = None\n self.C_g = None\n self.J_s = None\n self.nA = None\n \n def update_all(self, init):\n self.C_A = init[0]\n self.C_B = init[1]\n self.R_ion = init[2]\n self.C_g = init[3]\n self.J_s = init[4]\n self.nA = init[5]\n def update_R_ion(self, R_ion):\n self.R_ion = R_ion\n def values(self):\n return [self.C_A, \n self.C_B ,\n self.R_ion ,\n self.C_g ,\n self.J_s ,\n self.nA ]\n def update_param(self,param,value):\n setattr(self,param,value)\n # if param == 'C_A':\n # self.C_A = value\n # if param == 'C_B':\n # self.C_B = value\n # if param == 'C_g':\n # self.C_g = value\n # if param == 'R_ion':\n # self.R_ion = value\n # if param == 'J_s':\n # self.J_s = value\n # if param == 'nA':\n # self.nA = value\n \nclass fix_params(): #class that store the parameters to fix in the fit\n def __init__(self):\n self.C_A = False\n self.C_B = False\n self.R_ion = False\n self.C_g = False\n self.J_s = False\n self.nA = False\n def update_param(self,param,value):\n setattr(self,param,value)\n def get(self,param):\n return getattr(self, param)\n def fix_index(self): #find the index of variables to fix\n attr_list = [self.C_A, \n self.C_B ,\n self.R_ion ,\n self.C_g ,\n self.J_s ,\n self.nA ]\n index = []\n for i in range(0,6):\n if attr_list[i] == True:\n index.append(i)\n return index\n\n\ndef closest_node(node, nodes): #the function for finding the closest point on the line to the user selected point\n #nodes = np.asarray(nodes)\n deltas = nodes - node\n dist_2 = np.einsum('ij,ij->i', deltas, deltas)\n return np.argmin(dist_2)\n\ndef find_extremum(df): #algorithm to find the extremums of the Nyquist plot\n # wzlist = df[['frequency','impedance']].to_numpy()\n zrlist = np.real(df['impedance'].values)\n zilist = np.imag(df['impedance'].values)\n zrilist = np.stack((zrlist , -zilist), axis = 1)\n #now letting thr user to determine the rough location of the peak and local minimum\n plt.plot(zrlist,-zilist,'.')\n plt.title('please select the maxima')\n vertices = plt.ginput(2)\n plt.close()\n plt.plot(zrlist,-zilist,'.')\n plt.title('please select the minimum')\n mini = plt.ginput(1)\n plt.close()\n # use the approximate position the user selected to find the corresponding index in the dataframe\n nhlist = []\n nllist = []\n for vert in vertices:\n num = closest_node(vert , zrilist)\n nhlist.append(num)\n minnum = closest_node(mini , zrilist)\n nllist.append(minnum)\n return nhlist,nllist\n\n\ndef find_point(df):\n zlist = df['impedance'].to_numpy()\n wzlist = df[['frequency','impedance']].to_numpy()\n nhlist, nllist= find_extremum(df)\n R_n8 = wzlist[nllist[0]][1].real #obtained the R_n8 parameter\n R_n0 = wzlist[-1][1].real #obtained the R_n0 parameter\n whlist = np.real(wzlist[nhlist][:,0])\n w_n = min(whlist)\n w_t = wzlist[[nllist][0]][0][0].real #obtained the w_n parameter\n w_r = max(whlist)\n C_eff = np.real(np.imag(1/df['impedance'].values)/df['frequency'])\n # print(C_eff)\n #C_n = C_eff[-1]\n C_G = C_eff[0]\n return R_n8 , R_n0 , w_n , w_t , C_G ,w_r\n\n\n\n\ndef init_guess(df ,crit_points):\n R_n8 , R_n0 , w_n , w_t , C_G ,w_r = crit_points\n R_ion = float(input('please input your guess of R_ion: '))\n J_n = np.real(df['recomb current'][0]) \n # print('J_n is ', J_n)\n # J_n_t = R_n8 * q / (1.93 *kb *T)\n # print('theoretical J_n is:',1/J_n_t)\n k = R_n0 / R_n8\n n = J_n *R_n8 *q / ( kb *T)\n # print(n,J_n ,R_n8 ,q , kb ,T)\n C_ion = 1 / (w_n * k * R_ion)\n C_g = C_G\n C_A = C_ion / (1 - 1/k)\n C_B = 1 / (1/C_ion - 1/C_A)\n V = np.real(df['bias voltage'][0])\n # print(J_n , V, C_ion,C_A,q,n ,kb ,T)\n J_s = J_n / np.e**((V*(1 - C_ion/C_A)*q) / (n * kb * T)) #!!!!!!!!!!!!!!!!!!!!! HERE THE VALULE OF N IS OFF\n return C_A,C_B, R_ion, C_g, J_s,n\n\n\ndef init_guess_slider(df, points, R_ion): #the function for the slider of R_ion (known points and R_ion)\n R_n8 , R_n0 , w_n , w_t , C_G ,w_r = points\n J_n = np.real(df['recomb current'][0]) \n # print('J_n is ', J_n)\n # J_n_t = R_n8 * q / (1.93 *kb *T)\n # print('theoretical J_n is:',1/J_n_t)\n k = R_n0 / R_n8\n n = J_n *R_n8 *q / ( kb *T)\n # print(n,J_n ,R_n8 ,q , kb ,T)\n C_ion = 1 / (w_n * k * R_ion)\n C_g = C_G\n C_A = C_ion / (1 - 1/k)\n C_B = 1 / (1/C_ion - 1/C_A)\n V = np.real(df['bias voltage'][0])\n # print(J_n , V, C_ion,C_A,q,n ,kb ,T)\n J_s = J_n / np.e**((V*(1 - C_ion/C_A)*q) / (n * kb * T)) #!!!!!!!!!!!!!!!!!!!!! HERE THE VALULE OF N IS OFF\n return C_A,C_B, R_ion, C_g, J_s,n\n\n\n\n\n\n#%% FOR TESTING\n\n#first generating the dataframe for testing\n\nVT = 0.026 # = kT/q\nC_a = 2e-7\nC_b = 2e-7\nR_i = 5e8\nVb = .2\nC_g = 2.8e-8 \nJ_s = 7.1e-11\nnA = 1.93\nq = 1.6e-19\nT = 300\nkb = 1.38e-23\n\nimport pero_ig_fit_func as pif \n\n\nV = 0.8\nw = np.logspace(-6 , 6 , 1000) \n\nzlist, J1 = pif.pero_model(w, C_a, C_b, R_i, C_g, J_s, nA, V)\nwzlist = np.stack((w , zlist) , axis = -1)\nwzlist = wzlist[wzlist[:, 1].real.argsort()] # sorting the wzlist by the real part of the impedance(preparing for the find extrema function)\nvlist = np.ones(len(wzlist)) * V\njlist = np.ones(len(wzlist)) * J1\nwz_v_jlist = np.column_stack((wzlist , vlist,jlist))\n#v_wz_jlist = np.column_stack((v_))\ndf = pd.DataFrame(wz_v_jlist , columns=['frequency' , 'impedance' , 'bias voltage','recomb current'])\n\n\n#%% TESTING THE FIND POINT FUNCTION\n# R_n8 , R_n0 , w_n , w_t , C_G,w_r = find_point(df)\n\n\n#%% TESTING THE INIT GUESS FUNCTION\n# C_A,C_B, C_g, R_ion,n,J_s = init_guess(df)\n# print(C_A,C_B, C_g, R_ion,n,J_s)\n\n\n\n\n\n\n\"\"\"\nthe parts below this was to investigate the without-R_ion-guess method of initial guess algorithm.\noutdated for now.\n\"\"\"\n\n\n\n\n\n\n\n\n\n\n# #%%my own formula for w_t\n# C_ion = 1 / (1/C_a + 1/C_b)\n# c_ion = C_ion\n# R_ion = R_i\n# c_g = C_g\n# w_t_t = np.sqrt(C_g*(C_g + C_ion))/(R_i * C_g**2 + R_i*C_ion*C_g)\n# print(w_t_t)\n# w = np.logspace(-8,-2,10000)\n\n\n# def funcb(w): # the function of theta vs. freqeuncy derived in matlab\n# eq = -(1/(c_ion*w*(R_ion**2*c_g**2*w**2 + 1)) + (R_ion*c_g*(R_ion*c_g*w + R_ion*c_ion*w))/(c_ion*(R_ion**2*c_g**2*w**2 + 1)))/((R_ion*c_g)/(c_ion*(R_ion**2*c_g**2*w**2 + 1)) - (R_ion*c_g*w + R_ion*c_ion*w)/(c_ion*w*(R_ion**2*c_g**2*w**2 + 1)))\n# return eq\n\n\n# plt.plot(w,funcb(w))\n# plt.xscale('log')\n# plt.yscale('log')\n\n# #%%\n\n# # plt.plot(np.real(df['frequency'].values), np.angle(np.conjugate(df['impedance']),deg = False),'.')\n# # plt.plot(np.real(df['frequency'].values), (np.arctan(-np.imag(df['impedance'])/ np.real(df['impedance']))),)\n# # plt.xscale('log')\n# # plt.yscale('log')\n\n# plt.plot(np.real(df['frequency'].values), np.angle(-(df['impedance']),deg = False),'.')\n# # plt.plot(np.real(df['frequency'].values), np.angle(np.conjugate(df['impedance']),deg = False),'.')\n# #plt.plot(np.real(df['frequency'].values), -(np.arctan(np.imag(df['impedance'])/ np.real(df['impedance']))),)\n# plt.xscale('log')\n# plt.yscale('log')\n# #%%\n# c = 1 + 2j\n# a5 = np.arctan(np.imag(c)/ np.real(c))\n# a6 = np.angle(c,deg = True)\n# print(a5, a6)\n# #%%\n# C_ion_t = 1/(1/C_a +1/C_b)\n# w_t_t = 1/(R_i *np.sqrt(C_g*(C_g +C_ion_t)))\n# #%%#%% writing initial guess function, use w_n find c_g\n# n_J_n = R_n8 *q /(kb*T)\n# k = R_n0 / R_n8\n# def func(C_g):\n# #def func(C_ion):\n# # R_ion = 1 / (w_n * k * C_ion)\n# # C_g = 1/ (1 / C_G - 1/C_ion)\n# # eq = (R_ion * np.sqrt(C_g * (C_g + C_ion))) - 1 / w_t\n# C_ion = (1/(R_n8*w_r - 1/C_g))\n# #eq=(1/k/(w_n/(R_n8*w_r - 1/C_g))*np.sqrt(np.abs(C_g*(C_g + 1/(R_n8*w_r - 1/C_g)) ))- 1/w_t)\n# eq=(1/k/(w_n*C_ion))*np.sqrt((C_g*(C_g + C_ion) ))- 1/w_t \n# #print(np.sqrt(C_g * (C_g + C_ion)))\n# # C_ion = (1/(1/C_G -1/C_g))\n# # eq = 1/(k*w_n * C_ion) * ((C_g * (C_g + C_ion)))**0.5 - 1 / w_t\n# #eq = k/(w_n/(R_n8*w_r - 1/C_g))*np.sqrt((C_g*(C_g + 1/(R_n8*w_r - 1/C_g)))) - 1/w_t\n# #eq = np.abs(k/(w_n/(R_n8*w_r - 1/C_g))*np.sqrt(C_g*(C_g + 1/(R_n8*w_r - 1/C_g)) - 1/w_t))\n# return eq\n# # return R_ion * np.sqrt(C_g * (C_g + C_ion)) \n \n# #%% writing initial guess function, use C_g find c_g\n# def func(C_g):\n# C_ion = 1 / (1/C_G - 1/C_g)\n# eq=(1/k/(w_n*C_ion))*np.sqrt((C_g*(C_g + C_ion) ))- 1/w_t \n# return eq\n \n# #%%\n# # def func(C_g):\n# # C_ion = 1/(R_n8*w_r - 1/C_g)\n# # eq=(C_g*(C_g + C_ion))\n# # return C_ion\n\n \n# #%% writing initial guess function, use C_g find c_ion\n# def func(C_ion):\n# R_ion = 1 / (w_n * k * C_ion)\n# C_g = 1/ (1 / C_G - 1/C_ion)\n# #eq = (R_ion * np.sqrt((C_g * (C_g + C_ion)))) - 1 / w_t\n# eq = (R_ion * np.sqrt((C_g * (C_g + C_ion)))) - 1 / w_t\n# return eq \n \n# #%%writing function testing is C_g goes negaitve\n# # def func(C_ion):\n# # R_ion = 1 / (w_n * k * C_ion)\n# # C_g = 1/ (1 / C_G - 1/C_ion)\n# # eq = (R_ion * np.sqrt((C_g * (C_g + C_ion)))) - 1 / w_t\n# # return C_g \n\n \n# #%%\n# root = fsolve(func, 1e-7)\n# print(root)\n# #%%\n# # C_g = np.logspace(-10,10,10000)\n# C_ion = np.logspace(-8,-2,10000)\n# list9 = np.ones(10000)*10\n# # plt.plot(C_ion , func(C_ion)-0.9\n# plt.plot(C_ion , func(C_ion)+10)\n# plt.plot(C_ion , list9) \n# plt.xscale('log')\n# plt.yscale('log')\n\n\n# #%% theoretical values\n# C_i = 1/(1/C_a +1/C_b)\n# C_G_t = 1/(1/C_i +1/C_g)\n\n\n# plt.plot(df['frequency'], np.real(df['impedance']))\n\n# plt.xscale('log')\n# plt.yscale('log')\n\n\n\n\n\n\n# #%%\n# C_im =1/ (R_n8 *w_r - 1/4.7973e-05)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# #%% PROCEDURE of finding the initial guess\n# #这里是我在尝试用它\n# from scipy.optimize import fsolve\n\n\n\n# # def picking():\n# # pick_extremum(zrlist,zilist, 'max')\n# # pick_extremum(zrlist,zilist, 'min')\n\n\n# #preparing for finding the critical points on the plot \n# wzlist = df[['frequency','impedance']].to_numpy()\n# zrlist = np.real(df['impedance'].values)\n# zilist = np.imag(df['impedance'].values)\n# zrilist = np.stack((zrlist , -zilist), axis = 1)\n# #now letting thr user to determine the peak and local minimum\n# plt.plot(zrlist,-zilist,'.')\n# plt.title('please select the vertices')\n# vertices = plt.ginput(2)\n# plt.close()\n\n\n# plt.plot(zrlist,-zilist,'.')\n# plt.title('please select the minimum')\n# mini = plt.ginput(1)\n# plt.close()\n# # use the approximate position the user selected to find the corresponding index in the dataframe\n# def closest_node(node, nodes):\n# #nodes = np.asarray(nodes)\n# deltas = nodes - node\n# dist_2 = np.einsum('ij,ij->i', deltas, deltas)\n# return np.argmin(dist_2)\n\n# nhlist = []\n# nllist = []\n\n# for vert in vertices:\n# num = closest_node(vert , zrilist)\n# nhlist.append(num)\n\n# minnum = closest_node(mini , zrilist)\n# nllist.append(minnum)\n# nllist = np.asarray(nllist)\n# nhlist = np.asarray(nhlist)\n\n\n# # pick_extremum(zrlist,zilist, 'max')\n# # pick_extremum(zrlist,zilist, 'min')\n# # picking()\n# # def is_continue(cont):\n# # if cont == 'y':\n# # return True \n# # return False\n# # cont = input('are you ready to continue? y/n \\n')\n# # wait(lambda: is_continue(cont), timeout_seconds=120, waiting_for=\"something to be ready\")\n# # nhlist = np.loadtxt('vertices.txt',delimiter = ',')\n# # nllist = np.loadtxt('mins.txt',delimiter = ',')\n\n\n\n\n# # nhlist2 = np.array(argrelextrema(zilist, np.less))[0]\n# # nllist2 = np.array(argrelextrema(wzlist[:,1].imag, np.greater))[0]\n# whlist = np.real(wzlist[nhlist][: , 0])\n\n# # First extract all the needed information from the plot\n# R_n0 = zrlist[-1] #the last element of zrlist, corresponds to the end of the curve. Might need to revise for imcomplete circle\n# R_n8 = wzlist[nllist[0]][1].real\n# wn = min(whlist)\n# wr = max(whlist)\n# wt = wzlist[[nllist][0]][0][0].real #w_theta\n\n# #Now start to implement the finding algo\n# k = R_n8 / R_n0\n# #solving the three variable non linear equation.\n# def equations(p, R_n0 , R_n8 , wn , wr , wt , k ):\n# C_ion , C_g = p\n# eq1 = 1 / (R_n8 *wr - 1 / C_g) - C_ion\n# eq2 = k / (wn * C_ion) * np.sqrt(C_g * (C_g + C_ion)) - 1/wt\n# return (eq1 , eq2 )\n# #%%\n# C_ion , C_g = fsolve(lambda p: equations(p, R_n0 , R_n8 , wn , wr , wt , k ), (1 , 1) , maxfev= 1000000 )\n \n\n\n\n# print(C_ion , C_g)\n\n\n# #%% then trying out the functions\n# C_g = 2.8e-8 \n# C_ion = 1/ (1/C_a +1/C_b)\n\n# print(1 / (R_n8 *wr - 1 / C_g) - C_ion)\n\n# #%%trying the interactive point picking function\n\n# def pick_vertex(zrlist,zilist):\n# myfile = open('vertices.txt' , 'w')\n# myfile.close()\n# # simple picking, lines, rectangles and text\n# fig, ax1 = plt.subplots(1, 1)\n# ax1.set_title('Please click on the vertices of the Nyquist plot', picker=True)\n# ax1.set_ylabel('ylabel', picker=True, bbox=dict(facecolor='red'))\n# line, = ax1.plot(zrlist,-zilist, 'o', picker=True, pickradius=5)\n\n# # pick the rectangle\n# # ax2.bar(range(10), rand(10), picker=True)\n# # for label in ax2.get_xticklabels(): # make the xtick labels pickable\n# # label.set_picker(True)\n# def onpick1(event):\n# if isinstance(event.artist, Line2D):\n# thisline = event.artist\n# xdata = thisline.get_xdata()\n# ydata = thisline.get_ydata()\n# ind = event.ind\n# top = (np.column_stack([xdata[ind], -ydata[ind]])[0])\n# print('onpick1 line:', top)\n# myfile = open('vertices.txt' , 'a')\n# myfile.write(str(top)+'\\n')\n# myfile.close()\n# #return np.column_stack([xdata[ind], ydata[ind]])\n# # elif isinstance(event.artist, Rectangle):\n# # patch = event.artist\n# # print('onpick1 patch:', patch.get_path())\n# # elif isinstance(event.artist, Text):\n# # text = event.artist\n# # print('onpick1 text:', text.get_text())\n\n# fig.canvas.mpl_connect('pick_event', onpick1)\n\n \n# #pick_simple(zrlist,zilist)\n\n\n\n# #%% reading the string in file to be np array\n# #y = np.loadtxt('vertices.txt',delimiter = ',')\n \n# #nhlist = get_points('vertices.txt')\n\n\n\n","repo_name":"Nugkta/ISfit","sub_path":"older/old2/init_guess3.py","file_name":"init_guess3.py","file_ext":"py","file_size_in_byte":15859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"3832087677","text":"#!/usr/bin/env python3\n\"\"\"A simple gui interface\"\"\"\nimport sys\nimport traceback\nimport webbrowser\n\nimport config\nfrom pmca.commands.usb import *\nfrom pmca.ui import *\nfrom pmca.usb.usbshell import *\n\nif getattr(sys, 'frozen', False):\n from frozenversion import version\nelse:\n version = None\n\nclass PrintRedirector(object):\n \"\"\"Redirect writes to a function\"\"\"\n def __init__(self, func, parent=None):\n self.func = func\n self.parent = parent\n def write(self, str):\n if self.parent:\n self.parent.write(str)\n self.func(str)\n def flush(self):\n self.parent.flush()\n\n\nclass AppLoadTask(BackgroundTask):\n def doBefore(self):\n self.ui.setAppList([])\n self.ui.appLoadButton.config(state=DISABLED)\n\n def do(self, arg):\n try:\n print('')\n return list(listApps().values())\n except Exception:\n traceback.print_exc()\n\n def doAfter(self, result):\n if result:\n self.ui.setAppList(result)\n self.ui.appLoadButton.config(state=NORMAL)\n\n\nclass InfoTask(BackgroundTask):\n \"\"\"Task to run infoCommand()\"\"\"\n def doBefore(self):\n self.ui.infoButton.config(state=DISABLED)\n\n def do(self, arg):\n try:\n print('')\n infoCommand()\n except Exception:\n traceback.print_exc()\n\n def doAfter(self, result):\n self.ui.infoButton.config(state=NORMAL)\n\n\nclass InstallTask(BackgroundTask):\n \"\"\"Task to run installCommand()\"\"\"\n def doBefore(self):\n self.ui.installButton.config(state=DISABLED)\n return self.ui.getMode(), self.ui.getSelectedApk(), self.ui.getSelectedApp()\n\n def do(self, args):\n (mode, apkFilename, app) = args\n try:\n print('')\n if mode == self.ui.MODE_APP and app:\n installCommand(appPackage=app.package)\n elif mode == self.ui.MODE_APK and apkFilename:\n with open(apkFilename, 'rb') as f:\n installCommand(apkFile=f)\n else:\n installCommand()\n except Exception:\n traceback.print_exc()\n\n def doAfter(self, result):\n self.ui.installButton.config(state=NORMAL)\n\n\nclass FirmwareUpdateTask(BackgroundTask):\n \"\"\"Task to run firmwareUpdateCommand()\"\"\"\n def doBefore(self):\n self.ui.fwUpdateButton.config(state=DISABLED)\n return self.ui.getSelectedDat()\n\n def do(self, datFile):\n try:\n if datFile:\n print('')\n with open(datFile, 'rb') as f:\n firmwareUpdateCommand(f)\n except Exception:\n traceback.print_exc()\n\n def doAfter(self, result):\n self.ui.fwUpdateButton.config(state=NORMAL)\n\n\nclass StartUpdaterShellTask(BackgroundTask):\n \"\"\"Task to run updaterShellCommand() and open UpdaterShellDialog\"\"\"\n def doBefore(self):\n self.ui.startButton.config(state=DISABLED)\n\n def do(self, arg):\n try:\n print('')\n updaterShellCommand(complete=self.launchShell)\n except Exception:\n traceback.print_exc()\n\n def launchShell(self, dev):\n shell = UpdaterShellBackend(dev)\n shell.waitReady()\n\n endFlag = threading.Event()\n root = self.ui.master\n root.run(lambda: root.after(0, lambda: UpdaterShellDialog(root, shell, endFlag)))\n endFlag.wait()\n\n try:\n shell.syncBackup()\n except:\n print('Cannot sync backup')\n shell.exit()\n\n def doAfter(self, result):\n self.ui.startButton.config(state=NORMAL)\n\n\nclass TweakStatusTask(BackgroundTask):\n \"\"\"Task to run UpdaterShellBackend.getTweakStatus()\"\"\"\n def doBefore(self):\n self.ui.setState(DISABLED)\n\n def do(self, arg):\n try:\n return list(self.ui.shell.getTweakStatus())\n except Exception:\n traceback.print_exc()\n\n def doAfter(self, result):\n self.ui.setState(NORMAL)\n self.ui.setTweakStatus(result)\n\n\nclass TweakSetTask(BackgroundTask):\n \"\"\"Task to run UpdaterShellBackend.setTweakEnabled()\"\"\"\n def __init__(self, ui, id, var):\n BackgroundTask.__init__(self, ui)\n self.id = id\n self.var = var\n\n def doBefore(self):\n self.ui.setState(DISABLED)\n return self.var.get()\n\n def do(self, arg):\n try:\n self.ui.shell.setTweakEnabled(self.id, arg)\n except Exception:\n traceback.print_exc()\n\n def doAfter(self, result):\n self.ui.setState(NORMAL)\n self.ui.updateStatus()\n\n\nclass InstallerUi(UiRoot):\n \"\"\"Main window\"\"\"\n def __init__(self, title):\n UiRoot.__init__(self)\n\n self.title(title)\n self.geometry('450x500')\n self['menu'] = Menu(self)\n\n tabs = Notebook(self, padding=5)\n tabs.pack(fill=X)\n\n tabs.add(InfoFrame(self, padding=10), text='Camera info')\n tabs.add(InstallerFrame(self, padding=10), text='Install app')\n tabs.add(UpdaterShellFrame(self, padding=10), text='Tweaks')\n tabs.add(FirmwareFrame(self, padding=10), text='Update firmware')\n\n self.logText = ScrollingText(self)\n self.logText.text.configure(state=DISABLED)\n self.logText.pack(fill=BOTH, expand=True)\n\n self.redirectStreams()\n\n def log(self, msg):\n self.logText.text.configure(state=NORMAL)\n self.logText.text.insert(END, msg)\n self.logText.text.configure(state=DISABLED)\n self.logText.text.see(END)\n\n def redirectStreams(self):\n for stream in ['stdout', 'stderr']:\n setattr(sys, stream, PrintRedirector(lambda str: self.run(lambda: self.log(str)), getattr(sys, stream)))\n\n\nclass InfoFrame(UiFrame):\n def __init__(self, parent, **kwargs):\n UiFrame.__init__(self, parent, **kwargs)\n\n self.infoButton = Button(self, text='Get camera info', command=InfoTask(self).run, padding=5)\n self.infoButton.pack(fill=X)\n\n\nclass InstallerFrame(UiFrame):\n MODE_APP = 0\n MODE_APK = 1\n\n def __init__(self, parent, **kwargs):\n UiFrame.__init__(self, parent, **kwargs)\n\n self.modeVar = IntVar(value=self.MODE_APP)\n\n appFrame = Labelframe(self, padding=5)\n appFrame['labelwidget'] = Radiobutton(appFrame, text='Select an app from the app list', variable=self.modeVar, value=self.MODE_APP)\n appFrame.columnconfigure(0, weight=1)\n appFrame.pack(fill=X)\n\n self.appCombo = Combobox(appFrame, state='readonly')\n self.appCombo.bind('<>', lambda e: self.modeVar.set(self.MODE_APP))\n self.appCombo.grid(row=0, column=0, sticky=W+E)\n self.setAppList([])\n\n self.appLoadButton = Button(appFrame, text='Refresh', command=AppLoadTask(self).run)\n self.appLoadButton.grid(row=0, column=1)\n\n appListLink = Label(appFrame, text='Source', foreground='blue', cursor='hand2')\n appListLink.bind('', lambda e: webbrowser.open_new('https://' + config.appengineServer + '/apps'))\n appListLink.grid(columnspan=2, sticky=W)\n\n apkFrame = Labelframe(self, padding=5)\n apkFrame['labelwidget'] = Radiobutton(apkFrame, text='Select an apk', variable=self.modeVar, value=self.MODE_APK)\n apkFrame.columnconfigure(0, weight=1)\n apkFrame.pack(fill=X)\n\n self.apkFile = Entry(apkFrame)\n self.apkFile.grid(row=0, column=0, sticky=W+E)\n\n self.apkSelectButton = Button(apkFrame, text='Open apk...', command=self.openApk)\n self.apkSelectButton.grid(row=0, column=1)\n\n self.installButton = Button(self, text='Install selected app', command=InstallTask(self).run, padding=5)\n self.installButton.pack(fill=X, pady=(5, 0))\n\n self.run(AppLoadTask(self).run)\n\n def getMode(self):\n return self.modeVar.get()\n\n def openApk(self):\n fn = askopenfilename(filetypes=[('Apk files', '.apk'), ('All files', '.*')])\n if fn:\n self.apkFile.delete(0, END)\n self.apkFile.insert(0, fn)\n self.modeVar.set(self.MODE_APK)\n\n def getSelectedApk(self):\n return self.apkFile.get()\n\n def setAppList(self, apps):\n self.appList = apps\n self.appCombo['values'] = [''] + [app.name for app in apps]\n self.appCombo.current(0)\n\n def getSelectedApp(self):\n if self.appCombo.current() > 0:\n return self.appList[self.appCombo.current() - 1]\n\n\nclass FirmwareFrame(UiFrame):\n def __init__(self, parent, **kwargs):\n UiFrame.__init__(self, parent, **kwargs)\n\n datFrame = Labelframe(self, padding=5)\n datFrame['labelwidget'] = Label(datFrame, text='Firmware file')\n datFrame.pack(fill=X)\n\n self.datFile = Entry(datFrame)\n self.datFile.pack(side=LEFT, fill=X, expand=True)\n\n self.datSelectButton = Button(datFrame, text='Open...', command=self.openDat)\n self.datSelectButton.pack()\n\n self.fwUpdateButton = Button(self, text='Update firmware', command=FirmwareUpdateTask(self).run, padding=5)\n self.fwUpdateButton.pack(fill=X, pady=(5, 0))\n\n def openDat(self):\n fn = askopenfilename(filetypes=[('Firmware files', '.dat'), ('All files', '.*')])\n if fn:\n self.datFile.delete(0, END)\n self.datFile.insert(0, fn)\n\n def getSelectedDat(self):\n return self.datFile.get()\n\n\nclass UpdaterShellFrame(UiFrame):\n def __init__(self, parent, **kwargs):\n UiFrame.__init__(self, parent, **kwargs)\n\n self.startButton = Button(self, text='Start tweaking (updater mode)', command=StartUpdaterShellTask(self).run, padding=5)\n self.startButton.pack(fill=X)\n\n\nclass UpdaterShellDialog(UiDialog):\n def __init__(self, parent, shell, endFlag=None):\n self.shell = shell\n self.endFlag = endFlag\n UiDialog.__init__(self, parent, \"Updater mode tweaks\")\n\n def body(self, top):\n tweakFrame = Labelframe(top, padding=5)\n tweakFrame['labelwidget'] = Label(tweakFrame, text='Tweaks')\n tweakFrame.pack(fill=X)\n\n self.boxFrame = Frame(tweakFrame)\n self.boxFrame.pack(fill=BOTH, expand=True)\n\n self.doneButton = Button(top, text='Done', command=self.cancel, padding=5)\n self.doneButton.pack(fill=X)\n\n self.updateStatus()\n\n def updateStatus(self):\n TweakStatusTask(self).run()\n\n def setTweakStatus(self, tweaks):\n for child in self.boxFrame.winfo_children():\n child.destroy()\n if tweaks:\n for id, desc, status, value in tweaks:\n var = IntVar(value=status)\n c = Checkbutton(self.boxFrame, text=desc + '\\n' + value, variable=var, command=TweakSetTask(self, id, var).run)\n c.pack(fill=X)\n else:\n Label(self.boxFrame, text='No tweaks available').pack(fill=X)\n\n def setState(self, state):\n for widget in self.boxFrame.winfo_children() + [self.doneButton]:\n widget.config(state=state)\n\n def cancel(self, event=None):\n UiDialog.cancel(self, event)\n if self.endFlag:\n self.endFlag.set()\n\n\ndef main():\n \"\"\"Gui main\"\"\"\n ui = InstallerUi('pmca-gui' + (' ' + version if version else ''))\n ui.mainloop()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"joemaysong/test","sub_path":"pmca-gui.py","file_name":"pmca-gui.py","file_ext":"py","file_size_in_byte":9808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"71674332788","text":"# Both of these are strings\nstring_double_quote = \"Hello world\"\nstring_single_quote = 'Hello world'\n\n\n# You can make an empty string like this\nempty_string = \"\"\nempty_string = str()\n\n\n# Python strings are immutable\na = \"some random text\"\nb = a\nid(a), id(b)\na == b # True\na is b # True\n\nb += \", and concatenate more strings\"\n\na # \"some random text\"\nb # \"some random text, and concatenate more strings\"\nid(a), id(b)\n\n\n# Python string interpolation\nname = \"Carmen\"\nage = 16\n\n\"I have a dog and her name is {}. She is {} years old.\".format(name, age)\n\"I have a dog and her name is {1}. She is {0} years old.\".format(name, age)\n\"I have a dog and her name is {dog_name}. She is {dog_age} years old.\".format(dog_age=age, dog_name=name)\n\nsentence = \"I have a dog and her name is {}. She is {} years old.\"\nprint(sentence)\nprint(sentence.format(name, age))\n\nasdf = \"penut\" + \" butter\"\n\"penut\" * 3\n\n\n# Python string -> iterable\nword = \"internationalization\"\nlen(word) # Maybe awkward for Rubists at first\nword[5]\nword[-1]\nword[2:]\nword[2:6]\nfor char in word:\n print(char)\n\n# Common operation\nword.replace(\"inter\", \"\")\nword.split(\"liza\")\nword.split(\"n\")\n\"$\".join(word.split(\"n\")) # Definitely awkward\n\n\"nation\" in word # True\n\"hotdog\" in word # False\n\nanother_word = \"hello, World\"\nanother_word.lower()\nanother_word.upper()\nanother_word.capitalize()\nanother_word.title()\n","repo_name":"thomaswhyyou/python_examples","sub_path":"strings.py","file_name":"strings.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"5770625418","text":"# Python Code to list countries region area wise.\r\n# Author: Charudatta Chousalkar\r\n# Run Script as >>python P2_WebScrapping.py\r\n\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport pandas as pd\r\n\r\nweb_link = \"https://en.wikipedia.org/wiki/List_of_Asian_countries_by_area\"\r\nlink = requests.get(web_link).text\r\n#print(link)\r\n\r\n# Beautyfying above link.\r\nsoup = BeautifulSoup(link, \"lxml\")\r\n#print(soup)\r\n\r\nprint(soup.title.string,\"\\n\") # to remove tag\r\n\r\n# Identify right table first based on class\r\ncountry_table = soup.find('table', class_='wikitable sortable')\r\n#print(country_table)\r\n\r\ntable_links = country_table.find_all('a')\r\n#print(table_links)\r\n\r\n# Countries in list\r\ncountry = []\r\nfor links in table_links:\r\n\tcountry.append(links.get('title'))\r\n\r\n# Countries in DataFrame\r\ndf_country = pd.DataFrame()\r\ndf_country['Country'] = country\r\n\r\nprint(df_country)","repo_name":"Charudatta-Chousalkar/Python","sub_path":"P3_WebScraping.py","file_name":"P3_WebScraping.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"39143986677","text":"from eldar import Query\nimport pandas as pd\n\n\n# build dataframe\ndf = pd.DataFrame([\n \"Gandalf is a fictional character in Tolkien's The Lord of the Rings\",\n \"Frodo is the main character in The Lord of the Rings\",\n \"Ian McKellen interpreted Gandalf in Peter Jackson's movies\",\n \"Elijah Wood was cast as Frodo Baggins in Jackson's adaptation\",\n \"The Lord of the Rings is an epic fantasy novel by J. R. R. Tolkien\"],\n columns=['content'])\n\n# build query object\nquery = Query(\n '(\"gandalf\" OR \"frodo\") AND NOT (\"movies\" OR \"adaptation\")',\n ignore_case=True,\n ignore_accent=True,\n match_word=True)\n\n# calling the object returns True if the text matches the query.\n# You can filter a dataframe using pandas mask syntax:\ndf = df[df.content.apply(query)]\nprint(df)\n","repo_name":"kerighan/eldar","sub_path":"tests/pandas_test.py","file_name":"pandas_test.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"94"} +{"seq_id":"16629079546","text":"\n\nfrom __future__ import print_function\nfrom argparse import ArgumentParser, Namespace\n\n\nTEMPLATE_SEPARATOR = '#####'\nOUTPUT_PREFIX = '''\n// This file is the result of rendering `{filepath}`.\n// You should make changes to this code by editing that template; not\n// this file.\n\n'''\n\n\ndef main(argv):\n args = parse_args(argv[1:])\n with open(args.output, 'w') as output:\n print(render(args.filepath, args.limit), file=output)\n return 0\n\n\ndef parse_args(raw_args):\n p = ArgumentParser(\n description=('Renders the specified template file with the given '\n 'arity limit. The template file should contain a line '\n 'containing just `{}`, with the template text above '\n 'that separator, and the context generation code '\n 'below. The code should define a `context` function '\n 'that generates a dict. The template text is then '\n 'rendered by: '\n '`text.format(limit=limit, **(context(limit))`')\n .format(TEMPLATE_SEPARATOR))\n p.add_argument('limit', type=int)\n p.add_argument('filepath', type=str)\n p.add_argument('-o', '--output', default='/dev/stdout',\n help='The path to the file to write the rendered template to.')\n return p.parse_args(raw_args)\n\n\ndef render(filepath, limit):\n text = read_file(filepath)\n template, code = text.split('\\n' + TEMPLATE_SEPARATOR + '\\n', 2)\n context_func = execute(code, filepath)['context']\n context = context_func(limit)\n return (OUTPUT_PREFIX.format(filepath=filepath)\n + template.format(limit=limit, **context))\n\n\ndef execute(code, filepath):\n code_locals = {}\n code_obj = compile(code, filepath, 'exec')\n exec(code_obj, {}, code_locals)\n return code_locals\n\n\ndef read_file(path):\n with open(path, 'r') as f:\n return f.read()\n\n\nif __name__ == '__main__':\n import sys\n sys.exit(main(sys.argv))\n\n","repo_name":"mcinglis/libpp","sub_path":"templates/render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"94"} +{"seq_id":"22533828608","text":"#!/usr/bin/env python\nimport click\nimport configupdater\nimport functools\nimport os\nimport pathlib\nimport re\nimport site\nimport sys\n\n# theonionbox.py -d --config filepath box --host 0.0.0.0 --port 8080 ... tor --host 0.0.0.0 --port ... proxy --host: ...\n\n#####\n# Version & Stamping\n# Yes, I know; this is a hack...\ntry:\n import stamp\nexcept ModuleNotFoundError:\n from . import stamp\n\nstamped_version = '{} (stamp {})'.format(stamp.__version__, stamp.__stamp__)\n\ndef configfile(*param_decls, **attrs):\n\n def _callback(callback, ctx, param, value):\n\n ctx.default_map = ctx.default_map or {}\n\n if value:\n\n try:\n updater = configupdater.ConfigUpdater()\n updater.read(value)\n except Exception as e:\n raise click.BadOptionUsage(param, \"Error reading configuration file: {}\".format(e), ctx)\n\n try:\n b = updater['TheOnionBox'].to_dict()\n except KeyError:\n b = {}\n\n try:\n t = updater['Tor'].to_dict()\n except KeyError:\n t = {}\n\n try:\n p = updater['TorProxy'].to_dict()\n except KeyError:\n p = {}\n\n cfg = {\n 'box': b,\n 'tor': t,\n 'proxy': p\n }\n\n ctx.default_map.update(cfg)\n\n return callback(ctx, param, value) if callback else value\n\n def decorator(f):\n\n attrs['is_eager'] = True\n saved_callback = attrs.pop('callback', None)\n partial_callback = functools.partial(_callback, saved_callback)\n attrs['callback'] = partial_callback\n return click.option(*param_decls, **attrs)(f)\n\n return decorator\n\n\n@click.group(chain=True, invoke_without_command=True)\n@click.option('-d', '--debug', is_flag=True, flag_value=True,\n help='Switch on DEBUG mode.')\n@click.option('-t', '--trace', is_flag=True, flag_value=True,\n help='Switch on TRACE mode (which is more verbose than DEBUG mode).')\n@click.option('-l', '--log', default=None, show_default=True,\n type=click.Path(exists=True, file_okay=False, dir_okay=True, writable=True,\n resolve_path=True, allow_dash=False),\n help='DIRECTORY to additionally emit log messages to. Please assure write privileges.')\n@configfile('-c', '--config', default=None, show_default=True,\n type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True,\n resolve_path=True, allow_dash=False),\n help='Read configuration from FILE.')\n@click.option('-x', '--controlcenter', 'cc', default=None, show_default=True,\n type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True,\n resolve_path=True, allow_dash=False),\n help='Enable control center mode; controlled host data will be read from and stored in FILE.')\n@click.version_option(prog_name=f'{stamp.__title__}: {stamp.__description__}',\n version=stamped_version, message='%(prog)s\\nVersion %(version)s')\n@click.pass_context\ndef main(ctx, debug, trace, config, cc, log):\n # The Box will be launched by launcher() ... after all the subcommands returned.\n pass\n\n\n@main.resultcallback()\ndef launcher(results, debug, trace, config, cc, log):\n\n # This raises (by intension) if no context.\n ctx = click.get_current_context()\n\n params = {}\n for res in results:\n params.update(res)\n\n # Verify the parameters defined in the configuration file:\n for cmd in ['box', 'tor', 'proxy']:\n\n # If there was a config file, it's values have been loaded into the default_map\n dm = ctx.default_map.get(cmd, {})\n\n # This is a command\n func = globals()[cmd]\n\n # Get the interface = names of all valid parameters of our command\n func_params_name = [p.name for p in func.params]\n\n # Check if a parameter found in the config file...\n for d in dm:\n # ... is part of the interface:\n if d not in func_params_name:\n # If not, raise an error!\n ref = {\n 'box': 'TheOnionBox',\n 'tor': 'Tor',\n 'proxy': 'Proxy'\n }\n raise click.NoSuchOption(d, f'Invalid option in configuration file: [{ref[cmd]}] {d}')\n\n # After this validation,\n # we use the values found in the default_map to feed the commands that are not called via the command line\n if cmd not in params:\n # call the command with the values from the config file\n params.update(ctx.invoke(func, **dm))\n\n if params['proxy']['control'] == 'tor':\n transfer = ['control', 'host', 'port', 'socket']\n for item in transfer:\n params['proxy'][item] = params['tor'][item]\n else:\n check = ['host', 'port', 'socket']\n for item in check:\n if params['proxy'][item] == 'tor':\n params['proxy'][item] = params['tor'][item]\n\n params['debug'] = debug\n params['trace'] = trace\n params['config'] = config\n params['cc'] = cc\n params['log'] = log\n\n # Next We do all the stuff to prepare the environment to run a Box\n\n # Provide the demanded CurrentWorkingDirectory ... without truely changing to it via os.chdir !!\n # We resolve this Path, as __file__ might be relative, if __name__ == __main__.\n cwd = pathlib.Path(__file__).resolve()\n cwd = cwd.parent\n assert cwd.exists()\n params['cwd'] = cwd\n\n if __name__ == '__main__' or __package__ in [None, '']:\n # Add the current dir to the site-dirs, to allow ABSOLUTE import\n site.addsitedir(cwd)\n from tob.box import Box\n else:\n # we're in a package => RELATIVE should work.\n from .tob.box import Box\n\n tob = Box(params)\n tob.run()\n\n\n@main.command('box', short_help='Options to configure your Onion Box.')\n@click.option('--host', default='0.0.0.0', metavar='ADDRESS', show_default=True,\n help='IPv4 ADDRESS of your Onion Box, no PORT.')\n@click.option('--port', default='8080', metavar='PORT', show_default=True,\n help='Listening PORT of your Onion Box.')\n@click.option('--message_level', default='NOTICE', show_default=True,\n type=click.Choice(['DEBUG', 'INFO', 'NOTICE', 'WARNING', 'ERROR']),\n help='Error level verbosity.')\n@click.option('--base_path', default=None, metavar='PATH', show_default=True,\n help='Base path prefix.')\n#TODO: Verify documentation @ default config file: it says default is 300s! Questionable!\n@click.option('--session_ttl', default='30', metavar='SECONDS', show_default=True,\n help='Duration of session validity.')\n@click.option('--ssl_key', default=None, show_default=True,\n type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True, allow_dash=False),\n help='Path to key file for SSL operations.')\n@click.option('--ssl_certificate', default=None, show_default=True,\n type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True, allow_dash=False),\n help='Path to certificate file for SSL operations.')\n@click.option('--ntp_server', default=None, metavar='ADDRESS', show_default=True,\n help='ADDRESS of a NTP server.')\n@click.option('--geoip2_city', default=None,\n type=click.Path(exists=True, file_okay=True, dir_okay=False,\n readable=True, resolve_path=True, allow_dash=False),\n help='Path to supplemental GeoIP2 city database.')\n@click.option('--persistance_dir', default=None, show_default=True,\n type=click.Path(exists=True, file_okay=False, dir_okay=True,\n readable=True, writable=True, resolve_path=True, allow_dash=False),\n help='Path to store the database file for monitoring data persistance.')\n@click.pass_context\ndef box(ctx, host, port, message_level, base_path, session_ttl, ssl_key, ssl_certificate, ntp_server, geoip2_city,\n persistance_dir):\n \"\"\"Options to configure your Onion Box.\"\"\"\n\n if ssl_key is not None or ssl_certificate is not None:\n try:\n import ssl\n except ImportError:\n raise click.UsageError(\"SSL operations demand availability of Python module 'ssl'.\")\n\n if ssl_key is not None and ssl_certificate is None:\n raise click.BadOptionUsage(option_name='ssl_key',\n message=\"ssl_key FILE provided, but ssl_certificate FILE is missing.\")\n\n if ssl_key is None and ssl_certificate is not None:\n raise click.BadOptionUsage(option_name='ssl_certificate',\n message=\"ssl_certificate FILE provided, but ssl_key FILE is missing.\")\n\n if geoip2_city is not None:\n try:\n import geoip2\n except ImportError:\n raise click.BadOptionUsage(\n option_name='geoip2_city',\n message=\"Usage of the GeoIP2 City database demands availability of Python module 'geoip2'.\")\n\n if base_path is None:\n base_path = ''\n\n return {'box': {\n 'host': host,\n 'port': port,\n 'message_level': message_level,\n 'base_path': base_path,\n 'session_ttl': session_ttl,\n 'ssl_key': ssl_key,\n 'ssl_certificate': ssl_certificate,\n 'ntp_server': ntp_server,\n 'geoip2_city': geoip2_city,\n 'persistance_dir': persistance_dir\n }}\n\n@main.command('tor', short_help='Configure the connection to the Tor node to be monitored.')\n@click.option('--control', default='port', show_default=True,\n type=click.Choice(['port', 'proxy', 'socket']),\n help='Mode to establish a controlling connection.')\n@click.option('--host', default='127.0.0.1', metavar='ADDRESS', show_default=True,\n help='[IPv4 | URL | .onion] ADDRESS of this Tor node, no PORT.')\n@click.option('--port', default='auto', metavar='PORT', show_default=True,\n help='ControlPort of this Tor node.')\n@click.option('--socket', default=None, metavar='SOCKET', show_default=True,\n help='Local ControlSocket of the Tor node.')\n@click.option('--auth_cookie', default=None, metavar='COOKIE', show_default=True,\n help='Cookie necessary to support HiddenServiceAuthorizeClient.')\n@click.option('--password', default=None, metavar='PASSWORD', show_default=True,\n help='Password, necessary if this Tor node is guarded with a HashedControlPassword.')\n@click.pass_context\ndef tor(ctx, control, host, port, socket, auth_cookie, password):\n \"\"\"Settings to configure the connection to the Tor node to be monitored.\"\"\"\n\n if control in ['port', 'proxy']:\n if host is None or port is None:\n raise click.BadOptionUsage(\n option_name='control',\n message=f\"--control mode '{control}' requires --host and --port to be defined as well.\")\n elif control == 'socket':\n if socket is None:\n raise click.BadOptionUsage(option_name='control',\n message=\"--control mode 'socket' requires --socket to be defined as well.\")\n\n if auth_cookie is not None:\n check = re.match('^[a-zA-Z0-9+/]{22}$', auth_cookie) # see Tor(1), HidServAuth\n if check is None:\n raise click.BadParameter(param_hint='--auth_cookie',\n message=\"Parameter provided is not a Tor Authorization Cookie.\")\n\n return {'tor': {\n 'control': control,\n 'host': host,\n 'port': port,\n 'socket': socket,\n 'cookie': auth_cookie,\n 'password': password,\n 'label': None, # some additional properties, demanded by the cc\n 'connect': True\n }}\n\n\n@main.command('proxy', short_help='Configure the connection to a Tor node acting as proxy.')\n@click.option('--control', default='tor', show_default=True,\n type=click.Choice(['tor', 'port', 'socket']),\n help='Mode to establish a controlling connection.')\n@click.option('--host', default=None, metavar='ADDRESS', show_default=True,\n help='[IPv4 | URL] ADDRESS of the Tor node, no PORT.')\n@click.option('--port', default=None, metavar='PORT', show_default=True,\n help='ControlPort of the Tor node.')\n@click.option('--socket', default=None, metavar='SOCKET', show_default=True,\n help='Local ControlSocket of the Tor node.')\n@click.option('--proxy', default='auto', metavar='PORT', show_default=True,\n help='SocksPort of the Tor node.')\n@click.pass_context\ndef proxy(ctx, control, host, port, socket, proxy):\n \"\"\"Settings to configure the connection to a Tor node acting as proxy.\"\"\"\n\n if control == 'port':\n if host is None or port is None:\n raise click.BadOptionUsage(\n option_name='control',\n message=f\"--control mode '{control}' requires --host and --port to be defined as well.\")\n elif control == 'socket':\n if socket is None:\n raise click.BadOptionUsage(option_name='control',\n message=\"--control mode 'socket' requires --socket to be defined as well.\")\n\n return {'proxy': {\n 'control': control,\n 'host': host,\n 'port': port,\n 'socket': socket,\n 'proxy': proxy\n }}\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ralphwetzel/theonionbox","sub_path":"theonionbox/theonionbox.py","file_name":"theonionbox.py","file_ext":"py","file_size_in_byte":13575,"program_lang":"python","lang":"en","doc_type":"code","stars":120,"dataset":"github-code","pt":"94"} +{"seq_id":"28961841848","text":"from django.contrib.auth import get_user_model\nfrom django.core.management.base import BaseCommand\n\nfrom blog.models import Article, Tag, Category\n\nUser = get_user_model()\n\n\nclass Command(BaseCommand):\n \"\"\" run python manage.py create_fake_data\n 创建用于测试的假数据\n \"\"\"\n admin_username = 'fake_admin'\n admin_password = 'fake_admin'\n admin_email = 'fake_admin@qq.com'\n help = 'Create some fake data for display'\n\n def add_arguments(self, parser):\n # 文档地址: https://docs.python.org/zh-cn/3/library/argparse.html#action\n parser.add_argument(\n # run manage.py clear_migrations --count 20\n # 选项以-前缀开头, 位置参数就直接写参数名称\n '-c',\n '--count',\n action='store', # 默认值就是为store, 存储变量 count=20\n default=20,\n type=int,\n help='length of article_data (default 20)'\n )\n\n def handle(self, *args, **options):\n # get_or_create 没有便创建, 返回一个元组 (obj, created)\n user = User.objects.get_or_create(username=self.admin_username, email=self.admin_email, is_staff=True,\n is_superuser=True)[0]\n user.set_password(self.admin_password)\n user.save()\n\n parent_category = Category.objects.get_or_create(name='python学习', parent_category=None)[0]\n\n sub_category = Category.objects.get_or_create(name='django学习', parent_category=parent_category)[0]\n\n base_tag = Tag.objects.get_or_create(name='Django')[0]\n\n # 创建20篇测试文章\n count = options['count']\n for i in range(1, count + 1):\n article = Article.objects.get_or_create(\n category=sub_category,\n title='我是测试标题 ' + str(i),\n content='我是测试内容 ' + str(i),\n author=user,\n )[0]\n tag = Tag.objects.get_or_create(name='标签' + str(i))[0]\n\n article.tags.add(tag)\n article.tags.add(base_tag)\n article.save()\n\n # 创建文章\n article = Article.objects.get_or_create(\n category=sub_category,\n title='彬彬博客',\n author=user,\n content=\n \"\"\"\n### 支持Markdown\n\n```python\nprint('支持语法高亮')\n```\n \"\"\",\n )[0]\n article.tags.add(base_tag)\n article.save()\n\n self.stdout.write('\\nCreated a superuser (fake_admin fake_admin).')\n self.stdout.write('\\nData creation succeeded.')\n","repo_name":"enjoy-binbin/Django-blog","sub_path":"blog/management/commands/create_fake_data.py","file_name":"create_fake_data.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"94"} +{"seq_id":"29828245069","text":"import cv2 as cv\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom skimage.morphology import skeletonize, medial_axis\n\nimg = cv.imread('images/circles.png', 0)\n\n#Rendre l'image en binaire\nret, binary_image = cv.threshold(img, 127, 255, cv.THRESH_BINARY)\nbinary_image = binary_image.astype(np.uint8) // 255\n\n#Inversion de l'image binaire pour la squelettisation\nbinary_image = 1 - binary_image\n\n#Squelette d4\nskeleton_d4 = skeletonize(binary_image)\n\n#Squelette d8\nskeleton_d8 = medial_axis(binary_image)\n\nplt.figure(figsize=(10, 4))\nplt.subplot(131)\nplt.imshow(img, cmap='gray')\nplt.title('Originale')\nplt.axis('off')\nplt.subplot(132)\nplt.imshow(skeleton_d4, cmap='gray')\nplt.title('Squelette d4')\nplt.axis('off')\nplt.subplot(133)\nplt.imshow(skeleton_d8, cmap='gray')\nplt.title('Squelette d8')\nplt.axis('off')\nplt.show()\n\ncv.waitKey(0)\ncv.destroyAllWindows()","repo_name":"Imad-Allal/Image-Processing","sub_path":"morphological_operators/skeletons.py","file_name":"skeletons.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"17523060571","text":"from sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nimport numpy as np\n\nclass Nosampler(BaseEstimator, TransformerMixin):\n '''\n The nosampler class do not do any type of sampling. It shall be used to compare with common over, under and\n combined samplers\n\n '''\n\n #def __init__(self):\n #self.cols = cols\n\n def transform(self, X):\n return X\n\n def fit(self, X, y=None):\n return self\n\nclass ColumnExtractor(BaseEstimator, TransformerMixin):\n '''\n Column extractor method to extract selected columns from a list. This is used as a feature selector. Source\n https://stackoverflow.com/questions/25250654/how-can-i-use-a-custom-feature-selection-function-in-scikit-learns-pipeline,\n http://danielhnyk.cz/creating-your-own-estimator-scikit-learn/.\n\n '''\n\n def __init__(self, cols=[0]):\n self.cols = cols\n\n def transform(self, X):\n col_list = []\n if self.cols is not None:\n return X[:,self.cols]\n else:\n return X\n #for c in self.cols:\n # col_list.append(X[:, c:c+1])\n #return np.concatenate(col_list, axis=1)\n #return X[self.cols].values\n\n def fit(self, X, y=None):\n return self\n\ndef extract_data_subset(X_train, y_train, number_of_samples, shuffled=True):\n '''\n Extract subset of a dataset with X and y. The subset size is set and if the data shall be shuffled\n\n '''\n\n print(\"Original size X: \", X_train.shape)\n print(\"Original size y: \", y_train.shape)\n if number_of_samples < X_train.shape[0]:\n print(\"Quota of samples used in the optimization: {0:.2f}\".format(number_of_samples / X_train.shape[0]))\n _, X_train_subset, _, y_train_subset = train_test_split(X_train, y_train, random_state=0,\n test_size=number_of_samples / X_train.shape[0],\n shuffle=shuffled, stratify=y_train)\n else:\n X_train_subset = X_train\n y_train_subset = y_train\n\n print(\"Subset size X: \", X_train_subset.shape)\n print(\"Subset size y: \", y_train_subset.shape)\n\n a, b = np.unique(y_train_subset, return_counts=True)\n print(\"Classes {}, counts {}\".format(a, b))\n if len(a)<2:\n raise Exception(\"Only one class\")\n\n\n return X_train_subset, y_train_subset\n\ndef generate_result_table(gridsearch_run, params_run, refit_scorer_name):\n '''\n Generate a result table from a sklearn model run.\n gridsearch_run: the run\n params_run: parameters for the grid search\n refit_scorer_name: refit scorer name\n\n '''\n\n #merge all parameters to get the keys of all runs\n merged_params = {}\n # Check if parameters are a list or a dict. If they are a list of dicts, they have to be merged to get all\n # values within a table. Else, nothing has to be done.\n if isinstance(params_run, list):\n for d in params_run:\n merged_params.update(d)\n else:\n merged_params = params_run\n\n\n if isinstance(merged_params, list):\n table_parameters = ['param_' + x for x in merged_params[0].keys()]\n else:\n table_parameters = ['param_' + x for x in merged_params.keys()]\n\n metric_parameters = ['mean_test_' + refit_scorer_name, 'std_test_' + refit_scorer_name]\n result_columns = metric_parameters\n result_columns.extend(table_parameters)\n\n results = pd.DataFrame(gridsearch_run.cv_results_)\n results = results.sort_values(by='mean_test_' + refit_scorer_name, ascending=False)\n results[result_columns].round(3).head(20)\n\n return results[result_columns]\n\n\ndef create_parameter_grid_for_svm(results, top_results=None):\n '''Get the top x results from the result list and create a parameter list from it\n\n :args:\n results: Result table\n top_results: Pick the top results to create a new list of parameters for the random search. Default=None\n means that all values are picked.\n\n :return:\n params_new: New parameter list from the result values\n '''\n\n if top_results is None:\n top_results = results.shape[0]\n\n params_new = []\n for i in range(top_results):\n new_dict = {}\n new_dict['svm__C'] = [results.iloc[i]['param_svm__C']]\n new_dict['svm__gamma'] = [results.iloc[i]['param_svm__gamma']]\n params_new.append(new_dict)\n\n return params_new\n\ndef reduce_classes(y_classes, y_val, y_val_pred):\n '''\n Prepare the class list of classes, in case a class does not exist in the y_true and y_pred, which exist in y_train\n\n '''\n\n a = np.unique(y_val)\n b = np.unique(y_val_pred)\n c = np.hstack((a, b))\n unique_values = np.unique(c)\n existing_classes = []\n [existing_classes.append(y_classes[i]) for i in unique_values]\n print(\"Original classes: {}\".format(y_classes))\n print(\"Remaining classes to use: {}\".format(existing_classes))\n\n reduced_class_dict = dict(zip(c, existing_classes))\n\n return reduced_class_dict\n\ndef adjusted_classes(y_scores, t):\n \"\"\"\n This function adjusts class predictions based on the prediction threshold (t).\n Will only work for binary classification problems.\n\n # Create adjusted precision-recall curves\n # https://towardsdatascience.com/fine-tuning-a-classifier-in-scikit-learn-66e048c21e65\n \"\"\"\n return [1 if y >= t else 0 for y in y_scores]","repo_name":"alexanderwendt/sklearn_ml_toolbox","sub_path":"utils/sklearn_utils.py","file_name":"sklearn_utils.py","file_ext":"py","file_size_in_byte":5456,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"39161070944","text":"# This file contains code on live Traffic Detection of a video file\n\nimport cv2\n\n# Enter the path of video links below : \nvideo = cv2.VideoCapture('Videos/Autopilot_Dashcam.mp4')\n# video = cv2.VideoCapture('Videos/Demo_Vid.mp4')\n# video = cv2.VideoCapture('Videos/Vid_3_1.mp4')\n# video = cv2.VideoCapture('Videos/Pedestrians_1.mp4')\n\ncar_tracker_file = 'Classifier/car_detector.xml'\npedestrian_tracker_file = 'Classifier/pedestrian_detector.xml'\n\ncar_tracker = cv2.CascadeClassifier(car_tracker_file)\npedestrian_tracker = cv2.CascadeClassifier(pedestrian_tracker_file)\n\nwhile True:\n read_successful, frame = video.read()\n\n #safe coding \n\n if read_successful:\n grayscaled_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n else:\n break\n\n cars = car_tracker.detectMultiScale(grayscaled_frame)\n pedestrians = pedestrian_tracker.detectMultiScale(grayscaled_frame)\n\n\n for (x, y, w, h) in cars:\n cv2.rectangle(frame, (x,y), (x+w, y+h), (0, 0, 255),2)\n \n for (x, y, w, h) in pedestrians:\n cv2.rectangle(frame, (x,y), (x+w, y+h), (0, 255, 255),2)\n\n\n cv2.imshow('Video',frame)\n\n key = cv2.waitKey(1)\n\n if key==81 or key==113:\n break\n\nvideo.release()\n","repo_name":"SagarBapodara/Real-Time-Pedestrian-and-Vehicle-Detection-using-Computer-Vision","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"36380969446","text":"'''\nCreated on Oct, 2019\n\n@author: hugo\n\n'''\nimport os\nimport json\nimport argparse\n\nfrom ..core.utils.io_utils import *\n\n\ndef fetch_meta(path):\n try:\n data = load_gzip_json(path)\n except:\n return {}\n content = {}\n properties = data['property']\n if '/type/object/name' in properties:\n content['name'] = [x['value'] for x in properties['/type/object/name']['values']]\n else:\n content['name'] = []\n if '/common/topic/alias' in properties:\n content['alias'] = [x['value'] for x in properties['/common/topic/alias']['values']]\n else:\n content['alias'] = []\n if '/common/topic/notable_types' in properties:\n content['notable_types'] = [x['id'] for x in properties['/common/topic/notable_types']['values']]\n else:\n content['notable_types'] = []\n if '/type/object/type' in properties:\n content['type'] = [x['id'] for x in properties['/type/object/type']['values']]\n else:\n content['type'] = []\n return content\n\ndef fetch(data, data_dir):\n if not 'id' in data:\n return data['value']\n mid = data['id']\n # meta data might not be in the subgraph, get it from target files\n meta = fetch_meta(os.path.join(data_dir, '{}.json.gz'.format(mid.strip('/').replace('/', '.'))))\n if meta == {}:\n if not 'property' in data:\n if 'text' in data:\n return data['text']\n else:\n import pdb;pdb.set_trace()\n properties = data['property']\n if '/type/object/name' in properties:\n meta['name'] = [x['value'] for x in properties['/type/object/name']['values']]\n else:\n meta['name'] = []\n if '/common/topic/alias' in properties:\n meta['alias'] = [x['value'] for x in properties['/common/topic/alias']['values']]\n else:\n meta['alias'] = []\n if '/common/topic/notable_types' in properties:\n meta['notable_types'] = [x['id'] for x in properties['/common/topic/notable_types']['values']]\n else:\n meta['notable_types'] = []\n if '/type/object/type' in properties:\n meta['type'] = [x['id'] for x in properties['/type/object/type']['values']]\n else:\n meta['type'] = []\n graph = {mid: meta}\n if not 'property' in data: # we stop at the 2nd hop\n return graph\n properties = data['property']\n neighbors = {}\n for k, v in properties.items():\n if k.startswith('/common') or k.startswith('/type') \\\n or k.startswith('/freebase') or k.startswith('/user') \\\n or k.startswith('/imdb'):\n continue\n if len(v['values']) > 0:\n neighbors[k] = []\n for nbr in v['values']:\n nbr_graph = fetch(nbr, data_dir)\n neighbors[k].append(nbr_graph)\n graph[mid]['neighbors'] = neighbors\n return graph\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-data_dir', '--data_dir', required=True, type=str, help='path to the data dir')\n parser.add_argument('-fbkeys', '--freebase_keys', required=True, type=str, help='path to the freebase key file')\n parser.add_argument('-out_dir', '--out_dir', type=str, required=True, help='path to the output dir')\n args = parser.parse_args()\n\n ids = load_json(args.freebase_keys)\n total = len(ids)\n print('Fetching {} entities and their 2-hop neighbors.'.format(total))\n print_bar_len = 50\n cnt = 0\n missing_ids = set()\n with open(os.path.join(args.out_dir, 'freebase.json'), 'a') as out_f:\n for id_ in ids:\n try:\n data = load_gzip_json(os.path.join(args.data_dir, '{}.json.gz'.format(id_)))\n except:\n missing_ids.add(id_)\n continue\n graph = fetch(data, args.data_dir)\n graph2 = {id_: list(graph.values())[0]}\n graph2[id_]['id'] = list(graph.keys())[0]\n line = json.dumps(graph2) + '\\n'\n out_f.write(line)\n cnt += 1\n if cnt % int(total / print_bar_len) == 0:\n printProgressBar(cnt, total, prefix='Progress:', suffix='Complete', length=print_bar_len)\n printProgressBar(cnt, total, prefix='Progress:', suffix='Complete', length=print_bar_len)\n\n print('Missed %s mids' % len(missing_ids))\n # dump_json(list(missing_ids), os.path.join(args.out_dir, 'missing_fbids.json'))\n","repo_name":"hugochan/Graph2Seq-for-KGQG","sub_path":"src/scripts/prepare_freebase_for_webquestions.py","file_name":"prepare_freebase_for_webquestions.py","file_ext":"py","file_size_in_byte":4452,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"94"} +{"seq_id":"3572534957","text":"import requests,json,re,csv,os,subprocess,urllib2\nfrom pprint import pprint\nfrom os.path import expanduser\nfrom urllib2 import Request, urlopen\nimport dateutil.parser as parser\nimport ee\nee.Initialize()\n\ndef replicate(idl):\n with open(idl,'r') as f:\n reader = csv.DictReader(f)\n for i, line in enumerate(reader):\n destype = line['type']\n source = line['path']\n dest_root = str(ee.data.getAssetRoots()[0]['id'])\n sourc_root=\"users/\"+source.split('/')[1]\n print(\"Destination Source: \"+dest_root,\"Source root: \"+sourc_root)\n destination=source.replace(sourc_root,dest_root)\n print(\"Final Destination: \"+destination)\n print('')\n if destype== \"Folder\" and ee.data.getInfo(destination) is not None:\n print(\"Folder Exists\")\n elif destype== \"Folder\" and ee.data.getInfo(destination) is None:\n folder=dest_root+'/'+str(source.replace(sourc_root,'')).split('/')[-1]\n print(folder)\n ee.data.createAsset({'type': ee.data.ASSET_TYPE_FOLDER}, folder)\n elif destype==\"ImageCollection\" and ee.data.getInfo(destination) is not None:\n print('ImageCollection Exists')\n elif destype==\"ImageCollection\" and ee.data.getInfo(destination) is None:\n folder=str(source.replace(sourc_root,'')).split('/')[-1]\n ee.data.createAsset({'type': ee.data.ASSET_TYPE_IMAGE_COLL}, destination)\n#replicate(idl=r'C:\\Users\\samapriya\\Desktop\\gee-copy\\eerep.csv')\n","repo_name":"samapriya/gee-takeout","sub_path":"geetakeout/gee_replicate.py","file_name":"gee_replicate.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"94"} +{"seq_id":"2159308490","text":"import json\nfrom Funciones import *\nwith open(\"DarkSoulsWeapons.json\") as fichero:\n datos=json.load(fichero)\n\nprint ('''\n1. Listar todas las armas.\n2. Contar las armas que hay.\n3. Mostrar armas que empiecen por D.\n4. Mostrar el número de ataque y que muestre el nombre de las armas relacionadas.\n5. Muestra el daño mágico de las armas.\n0. Salir.''')\n\nopcion=int(input(\"Selecciona una opción: \"))\n\nwhile opcion !=0 :\n if opcion == 1:\n lista=todas_las_armas(datos)\n for i in lista:\n print (i)\n if opcion == 2:\n lista=contar_las_armas(datos)\n print (lista)\n if opcion == 3:\n lista=arma_por_d(datos)\n check=\"D\"\n res = [idx for idx in lista if idx[0].lower() == check.lower()]\n for i in res:\n print (f\"\\n-nombre: {i}\")\n if opcion == 4:\n num=int(input(\"Dime el número de ataque\"))\n lista=arma_damage(datos,num)\n for i in lista:\n print (f\"\\n-nombre: {i[0]}\\n -ataque físico: {i[1]}\")\n if opcion == 5:\n num=int(input(\"Dime el número de ataque mágico\"))\n lista=arma_damage(datos,num)\n for i in lista:\n print (f\"\\n-nombre: {i[0]}\\n -ataque mágico: {i[1]}\")\n opcion=int(input(\"Selecciona una opción: \"))\n","repo_name":"Evanticks/JSON","sub_path":"Programa_principal.py","file_name":"Programa_principal.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"30439574387","text":"# Write a function that accepts string from user and print all permutations of that string\ndef permutations(data, index=0):\n if index == len(data):\n print(\"\".join(data))\n else:\n for i in range(index, len(data)):\n data[index], data[i] = data[i], data[index]\n permutations(data, index + 1)\n data[index], data[i] = data[i], data[index]\n\nstring = input(\"Enter a string: \")\npermutations(list(string))","repo_name":"erapredator/pp2_KaliyevYerlan","sub_path":"lab3/function1/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"74640308150","text":"\"\"\"\nFirst example - solving LLS problem - no classes created\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nnp.random.seed(315054)\n\nNp = 10 # rows\nNf = 5 # features\n\nA = np.random.randn(Np, Nf)\nw = np.random.randn(Nf)\ny = A@w\n\n# %% Evaluate w hat\n# Apply the linear least squares method\nATA = A.T@A # Generate A^T * A\nATA_inv = np.linalg.inv(ATA) # Inverse of A^T * A\nATy = A.T@y # Generate A^T * y\n\nw_hat = ATA_inv@ATy\n\n# %% Check result\ne = y - A@w_hat\n\nerrsqnorm = np.linalg.norm(e)**2\n\nprint(\"Error square norm: \", errsqnorm)\n\n# %%\n\n\nplt.figure() # Create the figure\nplt.plot(w_hat, label='w_hat') # w_hat will be plotted as a line\nplt.plot(w, 'o', label='w') # w will be plotted with 'o's as markers\nplt.xlabel('n')\nplt.ylabel('w(n)')\nplt.legend()\nplt.grid()\nplt.title(\"Comparison between w and w_hat\")\nplt.show()\n","repo_name":"davmacario/ICTforHealth","sub_path":"lab00/example01.py","file_name":"example01.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"20020900416","text":"from collections import defaultdict\nclass Solution:\n\n def findOrder(self, numCourses, prerequisites):\n\n adj_list = defaultdict(list)#方式无key出现keyError错误\n\n indegree = {}\n for dest, src in prerequisites:\n adj_list[src].append(dest)\n indegree[dest] = indegree.get(dest, 0) + 1 #dict.get()返回dest键的值,若不存在,则返回默认值0\n\n # 查找indegree为0的node\n zero_indegree_queue = [k for k in range(numCourses) if k not in indegree.keys()]\n\n topological_sorted_order = []\n\n # Until there are nodes in the Q\n while zero_indegree_queue:\n\n # Pop one node with 0 in-degree\n vertex = zero_indegree_queue.pop(0)\n topological_sorted_order.append(vertex)\n\n # Reduce in-degree for all the neighbors\n if vertex in adj_list:\n for neighbor in adj_list[vertex]:\n indegree[neighbor] -= 1\n\n # Add neighbor to Q if in-degree becomes 0\n if indegree[neighbor] == 0:\n zero_indegree_queue.append(neighbor)\n\n return topological_sorted_order if len(topological_sorted_order) == numCourses else []\n\nif __name__ == \"__main__\":\n Solution=Solution()\n input=[[1,0],[2,0],[3,1],[3,2]]\n print(Solution.findOrder(4,input))","repo_name":"daiyifan1995/leetcode","sub_path":"Course Schedule2.py","file_name":"Course Schedule2.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"21672173141","text":"class BridgesAdjacencyList:\n\tdef __init__(self,n=None,graph=None,bridges=[]):\n\t\t#self.n1=n\n\t\tself.n=n\n\t\tself.id=0\n\t\tself.low=[float('inf')]*(self.n)\n\t\tself.ids=[float('inf')]*(self.n)\n\t\tself.solved=False\n\t\tself.visited=[False]*(self.n)\n\t\tself.graph=self.buildGraph(graph)\n\t\tself.bridges=[]\n\t\tself.arr=[[]*(self.n)]\n\n\tdef buildGraph(self,graph):\n\t\tself.arr=[[]*(self.n)]\n\t\t#arr=[[]*(self.n)]\n\t\tfor i in graph:\n\t\t\tx,y=i[0],i[1]\n\t\t\t#print(x,y)\n\t\t\tarr[x].append(y)\n\t\t\tarr[y].append(x)\n\t\treturn(arr)\n\n\tdef bridgesAdjList(graph,n):\n\t\tassert(graph!=None)\n\t\tassert(n>0)\n\t\tassert(len(graph)!=n)\n\t\tself.graph=graph\n\t\tself.n=n\n\n\t\"\"\"\n\tReturns list of pair of nodes that form bridges\n\n\n\t\"\"\"\n\tdef findBridges(self):\n\t\tif(self.solved):\n\t\t\treturn(self.bridges)\n\t\t#self.id=0\n\t\t#self.low = [float('inf')]*(self.n)\n\t\t#self.ids = [float('inf')] * (self.n)\n\t\t#self.visited = [False] * (self.n)\n\t\t#self.bridges=[]\n\t\tfor i in range(self.n):\n\t\t\tif(self.visited[i]==False):\n\t\t\t\tself.dfs(i,-1)\n\t\tself.solved = True\n\t\treturn(self.bridges)\n\n\tdef dfs(self,at,parent):\n\t\tself.visited[at]=True\n\t\tself.low[at] = self.ids[at] = self.id\n\t\tself.id+=1\n\n\t\tfor v in self.graph[at]:\n\t\t\tif(v==parent):\n\t\t\t\tcontinue\n\t\t\tif(self.visited[v]==False):\n\t\t\t\tself.dfs(v,at)\n\t\t\t\tself.low[at] = min(self.low[at],self.low[v])\n\t\t\t\tif(self.ids[at]<self.low[v]):\n\t\t\t\t\tself.bridges.append([at,v])\n\t\t\telse:\n\t\t\t\tself.low[at] = min(self.low[at],self.low[v])\n\narr=\t[\n\t\t\t[0,1],\n\t\t\t[0,2],\n\t\t\t[1,2],\n\t\t\t[2,3],\n\t\t\t[3,4],\n\t\t\t[2,5],\n\t\t\t[5,6],\n\t\t\t[6,7],\n\t\t\t[7,8],\n\t\t\t[8,5]\n\t\t]\nsolver = BridgesAdjacencyList(graph=arr,n=9)\nbridges = solver.findBridges()\nfor i in range(len(bridges)//2):\n\tn1,n2=bridges[2*i],bridges[2*i + 1]\n\tprint(\"bridges between nodes\",n1,\"and\",n2)\n\nprint(bridges)\n","repo_name":"AswinTekur/graphs_Py3","sub_path":"graphOptimizations/findBridges.py","file_name":"findBridges.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"70822403829","text":"\"\"\"\nUtility functions for mathematical operations\n\"\"\"\nfrom itertools import combinations\nfrom numpy import (append, ones,\n diag, sqrt, where, zeros)\nfrom numpy.linalg import norm\n\n\ndef add_constant(numpy_array):\n numpy_array_with_constant = append(ones(numpy_array.shape[0]).reshape(\n numpy_array.shape[0], 1), numpy_array, 1)\n return numpy_array_with_constant\n\n\ndef get_distance_matrix(R, distance_func):\n \"\"\"\n Computes a symmetric distance matrix\n :param R: A numpy array of features of dimension\n nfeatures x nobs\n :param distance_func: A function that calculates the\n distance between two vectors\n :returns A symmetric nfeatures x nfeatures matrix\n \"\"\"\n nrows = R.shape[0]\n distance_matrix = zeros(nrows * nrows).reshape(nrows, nrows)\n for c in combinations(range(0, len(R)), 2):\n distance_matrix[c] = distance_func(R[c[0]], R[c[1]])\n distance_matrix = distance_matrix.T + (\n distance_matrix - diag(diag(distance_matrix)))\n return distance_matrix\n\n\n\"\"\"\nDistance Functions\n\"\"\"\n\n\ndef euclidean(v1, v2):\n return sqrt(norm(v1)**2 + norm(v2)**2 - 2*v1.dot(v2))\n\n\ndef jaccard(v1, v2):\n \"\"\"\n Distance metric\n 1 - (number same items) / (items in either)\n \"\"\"\n v1_indices = where(v1 == 1)[0]\n v2_indices = where(v2 == 1)[0]\n nboth = len(set(v1_indices).intersection(v2_indices))\n nneither = len(set(v1_indices).union(v2_indices))\n return 1 - (nboth)/(nneither)\n","repo_name":"dlosardo/machine_learning","sub_path":"machine_learning/utils/math_utils.py","file_name":"math_utils.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"3902950483","text":"import os\nfrom gi.repository import Gtk\nimport gettext\nimport gpgmessages\nimport gpgplatform\n\n# Init translations\nos.environ.setdefault(\"LANG\", \"en\")\ngettext.install(\"gnupg-scripts\", unicode=1)\n\nclass GpgConf(object):\n \"\"\" Class for handling GnuPG's own settings (gpg.conf) \"\"\"\n def __init__(self, parent=None):\n \"\"\" Read gpg.conf and set variables accordingly \"\"\"\n self.gpgconf = os.path.join(gpgplatform.gnupg_home(parent), u\"gpg.conf\")\n self.parent = parent\n self.use_embedded_filename = True\n self.defaultkey = \"\"\n self.groups = []\n self.conflist = []\n self.read_config()\n\n def read_config(self): \n try:\n fr = open(self.gpgconf, \"r\")\n for line in fr.readlines():\n line = line.lstrip()\n if not line:\n line = \"\\n\"\n if line.startswith(\"group\") and line[5].isspace():\n grpdef = line.split(None, 1)\n if len(grpdef) > 1 and grpdef[1].find(\"=\") > 0:\n grpdef = grpdef[1].split(\"=\", 1)\n grpnam = grpdef[0].strip()\n grplst = grpdef[1].split()\n self.groups.append([ grpnam, grplst ])\n elif line.strip() == \"use-embedded-filename\":\n self.use_embedded_filename = True\n elif line.startswith(\"default-key\") and line[11].isspace():\n keydef = line.split()\n if len(keydef) > 1:\n self.defaultkey = keydef[1]\n else:\n self.conflist.append(line)\n fr.close()\n except IOError:\n pass\n\n\n def write_config(self):\n \"\"\" Write back gpg.conf with current settings \"\"\"\n try:\n fw = open(self.gpgconf, \"w\")\n for line in self.conflist:\n fw.write(line)\n for grp in self.groups:\n n = len(grp[1])\n if n > 0:\n line = \"group \"\n line += grp[0]\n line += \"=\"\n for key in grp[1]:\n line += key\n n -= 1\n if n:\n line += \" \"\n line += \"\\n\"\n fw.write(line)\n if self.use_embedded_filename:\n fw.write(\"use-embedded-filename\\n\")\n if self.defaultkey:\n fw.write(\"default-key \" + self.defaultkey + \"\\n\")\n fw.close()\n return True\n except IOError:\n msg = _(\"Error writing GnuPG configuration file %s!\") % self.gpgconf.decode('UTF-8', 'replace')\n gpgmessages.displayMessage(msg, _(\"Error\"), parent=self.parent)\n return False\n\n\n\nclass SrcConf(object):\n \"\"\" Class for handling settings of GnuPG-Scripts (gnupg-scripts.conf) \"\"\"\n def __init__(self, parent=None):\n \"\"\" Read gnupg-scripts.conf and set variables accordingly \"\"\"\n self.srcconf = os.path.join(gpgplatform.gnupg_home(parent),\n u\"gnupg-scripts.conf\")\n self.parent = parent\n self.pack = \"ask\"\n self.unpack = \"ask\"\n self.archive = \".zip\"\n self.packform = \"%y%m%d\"\n self.removeorig = False\n self.removepacked = False\n self.removeunpacked = False\n self.enctoself = True\n self.unsigned_keys = True\n try:\n fr = open(self.srcconf, \"r\")\n for line in fr.readlines():\n line = line.strip()\n if line.startswith(\"GNUPG_SCRIPTS_ENCRYPT_TO_SELF\"):\n keyval = line.split(\"=\")\n if len(keyval) > 1 and keyval[1].strip().lower() == \"no\":\n self.enctoself = False\n elif line.startswith(\"GNUPG_SCRIPTS_PACK\"):\n keyval = line.split(\"=\")\n if len(keyval) > 1:\n self.pack = keyval[1].strip().lower()\n elif line.startswith(\"GNUPG_SCRIPTS_UNPACK\"):\n keyval = line.split(\"=\")\n if len(keyval) > 1:\n self.unpack = keyval[1].strip().lower()\n elif line.startswith(\"GNUPG_SCRIPTS_ARCHIVE_FORMAT\"):\n keyval = line.split(\"=\")\n if len(keyval) > 1:\n self.archive = keyval[1].strip()\n if (self.archive != \".zip\" and\n self.archive != \".tar.gz\" and\n self.archive != \".tar.bz2\"):\n self.archive = \".zip\"\n elif (line.startswith(\"GNUPG_SCRIPTS_ZIP_NAME_FORMAT\") or\n line.startswith(\"GNUPG_SCRIPTS_ARCHIVE_NAME_FORMAT\")):\n keyval = line.split(\"=\")\n if len(keyval) > 1:\n self.packform = keyval[1].strip(\"\\\"\\t \")\n elif (line.startswith(\"GNUPG_SCRIPTS_REMOVE_ZIP_FILE\") or\n line.startswith(\"GNUPG_SCRIPTS_REMOVE_PACKED_FILE\")):\n keyval = line.split(\"=\")\n if len(keyval) > 1 and keyval[1].strip().lower() == \"yes\":\n self.removepacked = True\n elif (line.startswith(\"GNUPG_SCRIPTS_REMOVE_UNZIPPED_FILE\") or\n line.startswith(\"GNUPG_SCRIPTS_REMOVE_UNPACKED_FILE\")):\n keyval = line.split(\"=\")\n if len(keyval) > 1 and keyval[1].strip().lower() == \"yes\":\n self.removeunpacked = True\n elif line.startswith(\"GNUPG_SCRIPTS_REMOVE_ORIGINALS\"):\n keyval = line.split(\"=\")\n if len(keyval) > 1 and keyval[1].strip().lower() == \"yes\":\n self.removeorig = True\n elif line.startswith(\"GNUPG_SCRIPTS_USE_UNSIGNED_KEYS\"):\n keyval = line.split(\"=\")\n if len(keyval) > 1 and keyval[1].strip().lower() == \"no\":\n self.unsigned_keys = False\n fr.close()\n except IOError:\n pass\n\n\n def write_config(self):\n \"\"\" Write back gpg.conf with current settings \"\"\"\n try:\n fw = open(self.srcconf, \"w\")\n if self.enctoself:\n fw.write(\"GNUPG_SCRIPTS_ENCRYPT_TO_SELF=yes\\n\")\n else:\n fw.write(\"GNUPG_SCRIPTS_ENCRYPT_TO_SELF=no\\n\")\n if self.pack == \"yes\":\n fw.write(\"GNUPG_SCRIPTS_PACK=yes\\n\")\n elif self.pack == \"no\":\n fw.write(\"GNUPG_SCRIPTS_PACK=no\\n\")\n else:\n fw.write(\"GNUPG_SCRIPTS_PACK=ask\\n\")\n if self.unpack == \"yes\":\n fw.write(\"GNUPG_SCRIPTS_UNPACK=yes\\n\")\n elif self.unpack == \"no\":\n fw.write(\"GNUPG_SCRIPTS_UNPACK=no\\n\")\n else:\n fw.write(\"GNUPG_SCRIPTS_UNPACK=ask\\n\")\n if self.archive:\n fw.write(\"GNUPG_SCRIPTS_ARCHIVE_FORMAT=\" + self.archive + \"\\n\")\n if self.packform:\n fw.write(\"GNUPG_SCRIPTS_ARCHIVE_NAME_FORMAT=\" +\n self.packform + \"\\n\")\n if self.removepacked:\n fw.write(\"GNUPG_SCRIPTS_REMOVE_PACKED_FILE=yes\\n\")\n else:\n fw.write(\"GNUPG_SCRIPTS_REMOVE_PACKED_FILE=no\\n\")\n if self.removeunpacked:\n fw.write(\"GNUPG_SCRIPTS_REMOVE_UNPACKED_FILE=yes\\n\")\n else:\n fw.write(\"GNUPG_SCRIPTS_REMOVE_UNPACKED_FILE=no\\n\")\n if self.removeorig:\n fw.write(\"GNUPG_SCRIPTS_REMOVE_ORIGINALS=yes\\n\")\n else:\n fw.write(\"GNUPG_SCRIPTS_REMOVE_ORIGINALS=no\\n\")\n if self.unsigned_keys:\n fw.write(\"GNUPG_SCRIPTS_USE_UNSIGNED_KEYS=yes\\n\")\n else:\n fw.write(\"GNUPG_SCRIPTS_USE_UNSIGNED_KEYS=no\\n\")\n fw.close()\n return True\n except IOError:\n msg = _(\"Error writing script configuration file %s!\") % self.srcconf.decode('UTF-8', 'replace')\n gpgmessages.displayMessage(msg, _(\"Error\"), parent=self.parent)\n return False\n","repo_name":"Discreete-Linux/gnupg-scripts","sub_path":"debian/gnupg-scripts/usr/lib/python2.7/dist-packages/gpgconf.py","file_name":"gpgconf.py","file_ext":"py","file_size_in_byte":8324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"71798278711","text":"import pygame\nimport Constants\nfrom Vector import Vector\nfrom Agent import Agent\nfrom enum import Enum\n\n# enum for enemy behavior\nclass EnemyBehavior(Enum):\n SEEKING = 0\n FOLLOWING = 1\n FLEEING = 2\n\nclass Enemy(Agent):\n def __init__(self, initialPosition, initialSpeed, size, color):\n super().__init__(initialPosition, initialSpeed, size, color)\n # behavior for enemy\n self.behavior = EnemyBehavior.SEEKING\n # starting color\n self.normalColor = color\n # invulnerabilty frames\n self.iFrames = 0\n\n def switchMode(self):\n # changes behavior based on what it is currently\n if self.behavior == EnemyBehavior.SEEKING or self.behavior == EnemyBehavior.FOLLOWING:\n self.behavior = EnemyBehavior.FLEEING\n elif self.behavior == EnemyBehavior.FLEEING:\n self.behavior = EnemyBehavior.SEEKING\n\n def isPlayerClose(self, player):\n # finds the player's current position from enemy position\n range = player.position - self.position\n length = range.length()\n isInRange = length < Constants.ATTACK_RANGE\n \n if isInRange and self.behavior == EnemyBehavior.SEEKING:\n self.behavior = EnemyBehavior.FOLLOWING\n elif not isInRange and self.behavior == EnemyBehavior.FOLLOWING:\n self.behavior = EnemeyBehavior.SEEKING\n return isInRange\n\n def calcTrackingVelocity(self, player):\n self.target = player.center\n\n def draw(self, screen, player):\n # calls parent, Agent\n super().draw(screen)\n if self.behavior == EnemyBehavior.FOLLOWING:\n # line position\n centerPos = (self.center.x, self.center.y)\n targetPos = (self.target.x, self.target.y)\n pygame.draw.line(screen, Constants.SEEKING_LINE_COLOR, centerPos, targetPos)\n\n # seeking behavior\n def seek(self, player):\n # finds the player's current position from enemy position\n movementDirection = self.target - self.position\n\n # if player is within range, seeks\n if self.isPlayerClose(player):\n self.velocity = movementDirection.normalize()\n # stops movement\n else:\n self.velocity = Constants.ZERO_VECTOR\n\n # fleeing behavior\n def flee(self, player):\n # finds the player's current position from enemy position\n movementDirection = self.position - self.target\n\n # if player is within range, flees\n if self.isPlayerClose(player):\n self.velocity = movementDirection.normalize()\n # stops movement\n else:\n self.velocity = Constants.ZERO_VECTOR\n\n def update(self, boundx, boundy, player):\n self.calcTrackingVelocity(player)\n\n # checks to see if there is a collision and if there are no invulnerabilty frames\n if self.isInCollision(player) and self.iFrames == 0:\n # sets frames to constant I-Frames\n self.iFrames = Constants.I_FRAMES\n self.switchMode()\n \n # depending on which behavior, calls seek or flee methods\n if self.behavior == EnemyBehavior.SEEKING or self.behavior == EnemyBehavior.FOLLOWING:\n self.seek(player)\n elif self.behavior == EnemyBehavior.FLEEING:\n self.flee(player)\n \n # changes enemy color to indicate I-Frames\n if self.iFrames > 0:\n if self.iFrames % Constants.FLASHING_FRAMES == 1:\n if self.color == self.normalColor:\n self.color = Constants.ENEMY_I_FRAME_COLOR\n else:\n self.color = self.normalColor\n self.iFrames -= 1\n\n # calls parent, Agent\n super().update(boundx, boundy)","repo_name":"btan45/GDD3400","sub_path":"GDD3400/GDD3400/Enemy.py","file_name":"Enemy.py","file_ext":"py","file_size_in_byte":3746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"10242465277","text":"from flask import Blueprint, jsonify, request, redirect, url_for, current_app\nfrom src import db\nimport random\n\n# blueprint allows you to modulize your code\n# add routes to blueprint here -> add blueprint to app.py\nstudent = Blueprint('stud', __name__)\n\n\n# returns all of the apartments are posted on the site\n@student.route('/students/all', methods=['GET'])\ndef get_all_apartments():\n cur = db.get_db().cursor()\n\n # get all data from db\n cur.execute('select * from apartment natural join utilities')\n col_headers = [x[0] for x in cur.description]\n # creates container\n json_data = []\n the_data = cur.fetchall()\n for row in the_data:\n json_data.append(dict(zip(col_headers, row)))\n return jsonify(json_data)\n\n\n\n\n# returns only the apartments that currently have openings\n@student.route('/students/availableApartments', methods=['GET'])\ndef get_available():\n cur = db.get_db().cursor()\n\n # get all data from db\n cur.execute('select * from apartment where numberOfBedroomsForLease > 0')\n col_headers = [x[0] for x in cur.description]\n # creates container\n json_data = []\n the_data = cur.fetchall()\n for row in the_data:\n json_data.append(dict(zip(col_headers, row)))\n return jsonify(json_data)\n\n\n# returns on the apartments that have central air in order to attract\n# a more high-class clientelle\n@student.route('/students/centralAir', methods=['GET'])\ndef get_all_utilities():\n cur = db.get_db().cursor()\n\n # get all data from db\n cur.execute('select * from apartment natural join utilities where centralAir = 1')\n col_headers = [x[0] for x in cur.description]\n # creates container\n json_data = []\n the_data = cur.fetchall()\n for row in the_data:\n json_data.append(dict(zip(col_headers, row)))\n return jsonify(json_data)","repo_name":"eassiradoo/ApartmentsNEU","sub_path":"abc/flask-app/src/students/students.py","file_name":"students.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"73003856630","text":"# Whats cool about this solution is that as each call splits into three,\n# we negate the possibility of any duplicate calculations that may happen\n# in any branching off.\n\n# Total steps and the memoization hashtable (or in this case array).\ndef calculate_steps(total, mem_array):\n if (total < 0): return 0\n if (total == 0): return 1\n # Cache and return as we go.\n if (mem_array[total] > -1): return mem_array[total]\n mem_array[total] = (\n calculate_steps(total - 1, mem_array) +\n calculate_steps(total - 2, mem_array) +\n calculate_steps(total - 3, mem_array)\n )\n return mem_array[total]\n\n\ndef calculate_all_steps(total):\n memoization_array = [-1] * (total + 1)\n return calculate_steps(total, memoization_array)\n\n\ntotal_steps = calculate_all_steps(100)\n\nprint('total steps: ', total_steps)","repo_name":"alpha-nero1/CrackingTheCodingInterview","sub_path":"Chapter8/Python/question_8.1.py","file_name":"question_8.1.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"28847982222","text":"import os\n\nfrom mock import MagicMock, patch\nfrom twisted.trial import unittest\nfrom twisted.web.test.requesthelper import DummyRequest\n\nfrom pixelated.resources.account_recovery_resource import AccountRecoveryResource\nfrom test.unit.resources import DummySite\n\n\nclass TestAccountRecoveryResource(unittest.TestCase):\n def setUp(self):\n self.services_factory = MagicMock()\n self.resource = AccountRecoveryResource(self.services_factory)\n self.web = DummySite(self.resource)\n\n def test_get(self):\n request = DummyRequest(['/recovery'])\n request.method = 'GET'\n d = self.web.get(request)\n\n def assert_200_when_user_logged_in(_):\n self.assertEqual(200, request.responseCode)\n self.assertIn(\"DOCTYPE html\", request.written[0])\n\n d.addCallback(assert_200_when_user_logged_in)\n return d\n","repo_name":"annacruz/pixelated-user-agent","sub_path":"service/test/unit/resources/test_account_recovery_resource.py","file_name":"test_account_recovery_resource.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"94"} +{"seq_id":"37120176021","text":"import json, os\nimport requests\nimport pytz\nfrom datetime import datetime\nfrom app.data import cache\n\n\n@cache.cached(timeout=50, key_prefix='fixer_parse_today')\ndef parse_today():\n fixer_access_key = os.environ.get('FIXER_ACCESS_KEY')\n url = 'http://data.fixer.io/api/latest?access_key={}&format=1&base=EUR'.format(fixer_access_key)\n data = json.loads(requests.get(url).text)\n if not data:\n return None\n date = datetime.strptime(data.get('date'), '%Y-%m-%d')\n try:\n date = pytz.timezone('Europe/Madrid').localize(date, is_dst=None)\n date = date.astimezone(pytz.timezone('America/Mexico_City'))\n except:\n return None\n\n rates = data.get('rates')\n if not rates:\n return None\n\n mxn = rates.get('MXN')\n usd = rates.get('USD')\n if not mxn or not usd:\n return None\n\n return date, (mxn / usd)\n\n\nif __name__ == '__main__':\n print(parse_today())\n","repo_name":"amielsinue/exchanges_us_to_mx","sub_path":"libs/fixer.py","file_name":"fixer.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"15939887965","text":"from PIL import Image\r\nfrom psd_tools import PSDImage\r\nimport os\r\n\r\n\r\ndef load_image_from_file(path: str, additional_types: tuple):\r\n if os.path.exists(path) and os.path.isfile(path):\r\n if path.endswith(\".psd\"):\r\n psd_image = PSDImage.open(path).composite()\r\n if not hasattr(psd_image, \"filename\"):\r\n psd_image.filename = path\r\n return psd_image\r\n if path.endswith(additional_types):\r\n return Image.open(path)\r\n return Image.new(\"RGB\", (0, 0))\r\n\r\n\r\ndef get_image(img, types: tuple | None = (\".png\", \".jpg\", \".jpeg\", \".webp\")):\r\n if type(img) == str:\r\n return load_image_from_file(img, types)\r\n elif type(img) == Image.Image:\r\n return img\r\n error = Image.new(\"RGB\", (0, 0))\r\n return error\r\n","repo_name":"ClaudioScappatura/WOM_Tools_Claudio","sub_path":"WOM/image_utilities.py","file_name":"image_utilities.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"29211909496","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\nn = int(input())\r\ndice = list(map(int, input().split()))\r\n\r\nacross = {\r\n 0: 5,\r\n 1: 4,\r\n 2: 3,\r\n 3: 2,\r\n 4: 1,\r\n 5: 0\r\n}\r\n\r\nsorted_dice = sorted(dice)\r\n\r\n# 1개 면 최소값 min_1 에 저장\r\nmin_1 = sorted_dice[0]\r\n\r\n# 2개 면 최소값 min_2 에 저장\r\nif dice[across[dice.index(sorted_dice[0])]] == sorted_dice[1]:\r\n min_2 = sorted_dice[0] + sorted_dice[2]\r\nelse:\r\n min_2 = sorted_dice[0] + sorted_dice[1]\r\n\r\n# 3개 면 최소값 min_3 에 저장\r\nmin_3 = sys.maxsize\r\nfor i in range(2, 4):\r\n min_3 = min(min_3, dice[i] + dice[0] + dice[1])\r\n min_3 = min(min_3, dice[i] + dice[1] + dice[5])\r\n min_3 = min(min_3, dice[i] + dice[5] + dice[4])\r\n min_3 = min(min_3, dice[i] + dice[4] + dice[0])\r\n\r\n# 3면 쓰는 곳 개수\r\ncount_3 = 4\r\n# 2면 쓰는 곳 개수\r\ncount_2 = 4*(n-1) + 4*(n-2)\r\n# 1면 쓰는 곳 개수\r\ncount_1 = 0\r\nfor i in range(2, n):\r\n count_1 += 2*(i-1)\r\ncount_1 = count_1 * 4 + (n-2)**2\r\n\r\nif n == 1:\r\n print(sum(sorted_dice[:5]))\r\nelse:\r\n print(min_1*count_1 + min_2*count_2 + min_3*count_3)","repo_name":"SeungtaeRyu/Algorithm","sub_path":"백준/Gold/1041. 주사위/주사위.py","file_name":"주사위.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"1359614523","text":"import pdb\nimport time\nfrom datetime import datetime, timedelta\nfrom pytz import timezone\nfrom odoo import tools\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import UserError\n\n\nclass ArmourerVisitReport(models.AbstractModel):\n\t_name = 'report.sos.report_armourervisit'\n\t_description = 'Armourer Visit Report'\n\t\n\tdef get_date_formate(self,sdate):\n\t\tss = datetime.strptime(sdate,'%Y-%m-%d')\n\t\treturn ss.strftime('%d %b %Y')\n\n\t@api.model\n\tdef _get_report_values(self, docids, data=None):\n\t\tif not data.get('form'):\n\t\t\traise UserError(_(\"Form Centent is Missing.\"))\n\t\t\n\t\tdate_from = data['form']['date_from'] and data['form']['date_from'] \n\t\tdate_end= data['form']['date_end'] and data['form']['date_end']\n\t\t\n\t\trecs = self.env['sos.armourer.visit'].search([('date', '>=', date_from), ('date', '<=', date_end)], order='date')\t\n\t\treport = self.env['ir.actions.report']._get_report_from_name('sos.report_armourervisit')\n\t\t\n\t\treturn {\n\t\t\t'doc_ids': self.ids,\n\t\t\t'doc_model': report.model,\n\t\t\t'data': data['form'],\n\t\t\t'docs': self,\n\t\t\t'time': time,\n\t\t\t'Visits' :recs or False,\n\t\t\t'get_date_formate' : self.get_date_formate,\n\t\t}","repo_name":"royalline1/sos_guards","sub_path":"sos/report/armourer_visit_report.py","file_name":"armourer_visit_report.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"4673966296","text":"import argparse\n\nimport numpy as np\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn import preprocessing\nimport pickle as pickle\n\nclass ERFTrainer(object):\n def __init__(self, X, label_words):\n self.le = preprocessing.LabelEncoder()\n self.clf = ExtraTreesClassifier(n_estimators=100, max_depth=16,random_state=0)\n \n y = self.encode_labels(label_words)\n self.clf.fit(np.asarray(X), y)\n \n def encode_labels(self, label_words):\n self.le.fit(label_words)\n return np.array(self.le.transform(label_words), dtype=np.float32)\n \n def classify(self, X):\n label_nums = self.clf.predict(np.asarray(X))\n label_words = self.le.inverse_transform([int(x) for x in label_nums])\n return label_words\n \nif __name__=='__main__':\n\n feature_map_file = 'feature_map.pkl'\n model_file = 'erf.pkl'\n\n #Load the feature map\n with open(feature_map_file, 'rb') as f:\n feature_map = pickle.load(f)\n \n # Extract feature vectors and the labels\n label_words = [x['object_class'] for x in feature_map]\n dim_size = feature_map[0]['feature_vector'].shape[1] \n X = [np.reshape(x['feature_vector'], (dim_size,)) for x in feature_map]\n \n # Train the Extremely Random Forests classifier\n erf = ERFTrainer(X, label_words) \n if model_file:\n with open(model_file, 'wb') as f:\n pickle.dump(erf, f)\n \n \n \n \n ","repo_name":"PazLiu/ERF-attr","sub_path":"openCVattr/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"94"} +{"seq_id":"19340120593","text":"from . import Indicator\n\nimport numpy as np\n\n\nclass sar(Indicator):\n '''\n Defined by J. Welles Wilder, Jr. in 1978 in his book *\"New Concepts in\n Technical Trading Systems\"* for the RSI\n\n SAR stands for *Stop and Reverse* and the indicator was meant as a signal\n for entry (and reverse)\n\n How to select the 1st signal is left unspecified in the book. Because the\n inputs are \"high\" and \"low\", a 1-bar MinusDM is calculated, which accounts\n for both downmove and upmove of high/low. This is also done by ta-lib\n\n See:\n - https://en.wikipedia.org/wiki/Parabolic_SAR\n - http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:parabolic_sar\n '''\n group = 'momentum'\n alias = 'SAR', 'psar', 'ParabolicStopAndReverse'\n inputs = 'high', 'low'\n outputs = 'sar'\n params = (\n ('af', 0.02, 'Acceleration Factor'),\n ('afmax', 0.20, 'Maximum Acceleration Factor'),\n )\n\n def __init__(self):\n sarbuf = self.i.high(val=np.nan) # result buffer\n sar = self.i.high._apply(self._sarize, self.i.low, sarbuf, raw=True)\n self.o.sar = sar._period(1) # the 1st bar is ignored in _sarize\n\n def _sarize(self, high, low, sarbuf):\n # kick start values\n hi1, lo1 = high[0], low[0]\n hi, lo = high[1], low[1]\n\n # Calculate a minusdm of the 1st two values to set the trend\n upmove, downmove = hi - hi1, lo1 - lo\n minusdm = max(downmove, 0.0) * (downmove > upmove)\n trend = not (minusdm > 0) # initial trend, long if not downmove\n\n # use the trend to set the first ep, sar values\n ep, sar = (hi, lo1) if trend else (lo, hi1)\n\n af, AF, AFMAX = self.p.af, self.p.af, self.p.afmax # acceleration\n\n for i in range(1, len(high)): # loop over\n hi1, lo1 = hi, lo\n hi, lo = high[i], low[i]\n\n if trend:\n if lo <= sar: # trend reversal\n trend = 0\n sarbuf[i] = sar = ep # update sar and annotate\n ep, af = lo, AF # kickstart ep and af\n sar = max(sar + af * (ep - sar), hi, hi1) # new sar\n else:\n sarbuf[i] = sar # no change, annotate current sar\n if hi > ep: # if extreme breached\n ep, af = hi, min(af + AF, AFMAX) # annotate, update af\n sar = min(sar + af * (ep - sar), lo, lo1) # recalc sar\n else: # trend is 0\n if hi >= sar: # trend reversal\n trend = 1\n sarbuf[i] = sar = ep # update sar and annotate\n ep, af = hi, AF # kickstart ep and af\n sar = min(sar + af * (ep - sar), lo, lo1)\n else:\n sarbuf[i] = sar\n if lo < ep: # if extreme breached\n ep, af = lo, min(af + AF, AFMAX) # annotate, update af\n sar = max(sar + af * (ep - sar), hi, hi1)\n\n return sarbuf\n","repo_name":"mementum/bta-lib","sub_path":"btalib/indicators/sar.py","file_name":"sar.py","file_ext":"py","file_size_in_byte":3053,"program_lang":"python","lang":"en","doc_type":"code","stars":424,"dataset":"github-code","pt":"94"} +{"seq_id":"30075208636","text":"import json\nimport pdb\nimport pandas as pd\nfrom tqdm import tqdm\n\n\nfrom collections import Counter\n\ndef load_json(filename):\n with open(filename, 'r') as f:\n data = f.read()\n data = json.loads(data)\n return data\n\n\ndef aro_val_to_quads(aro, val):\n\taro, val = int(aro), int(val)\n\tif aro == 1 and val == 1:\n\t\tquad = 1\n\telif aro == 1 and val == -1:\n\t\tquad = 2\n\telif aro == -1 and val == -1:\n\t\tquad = 3\n\telif aro == -1 and val == 1:\n\t\tquad = 4\n\treturn quad\n\n\ndef main(anno, df):\n\ttags = anno.moodValue.unique()\n\tsongs = df.cdr_track_num.unique().tolist()\n\tfor s in tqdm(songs):\n\t\tcnt_quad = Counter({_: 0 for _ in range(1, 5)})\n\t\tcnt_mood = Counter({_: 0 for _ in tags})\n\t\tthis_song = anno[anno.externalID == s]\n\t\tcnt_quad.update(this_song.quadrant)\n\t\tcnt_mood.update(this_song.moodValue)\n\t\tnum_users = this_song.shape[0]\n\n\t\tdf.loc[df.cdr_track_num == s, 'num_users'] = num_users\n\t\tdf.loc[df.cdr_track_num == s, cnt_quad.keys()] = cnt_quad.values()\n\t\tdf.loc[df.cdr_track_num == s, cnt_mood.keys()] = cnt_mood.values()\n\n\t\ttxt_free = ' '.join([_ for _ in this_song.freeMood.tolist() if _])\n\t\tdf.loc[df.cdr_track_num == s, 'txt_free'] = txt_free\n\t\ttxt_arousal = ' '.join([_ for _ in this_song.arousalComment.tolist() if _]).replace(',', ' ')\n\t\ttxt_valence = ' '.join([_ for _ in this_song.valenceComment.tolist() if _]).replace(',', ' ')\n\t\ttxt_quad = txt_arousal + txt_valence\n\t\tdf.loc[df.cdr_track_num == s, 'txt_quad'] = txt_quad\n\t\ttxt_mood = ' '.join([_ for _ in this_song.moodComment.tolist() if _]).replace(',', ' ')\n\t\tdf.loc[df.cdr_track_num == s, 'txt_mood'] = txt_mood\n\n\t\tdf.loc[df.cdr_track_num == s, 'pref'] = Counter(this_song.favSong)['1']/(num_users + 0.01)\n\t\tdf.loc[df.cdr_track_num == s, 'fam'] = Counter(this_song.knownSong)['1']/(num_users + 0.01)\n\n\tdf.to_csv('./data/summary_anno.csv', sep='\\t')\n\tpdb.set_trace()\n\n\nif __name__ == \"__main__\":\n\t# fn = './data/data_24_11_2021.json'\n\tfn = './data/data_07_03_2022.json'\n\tcsv = './data/summary.csv'\n\ttags = ['joy', 'power', 'surprise', 'anger', 'tension', 'fear', 'sadness', 'bitterness', 'peace', 'tenderness', 'transcendence']\n\tdata = load_json(fn)\n\n\tanno = pd.DataFrame(data['annotations'])\n\tanno['quadrant'] = list(map(aro_val_to_quads, anno['arousalValue'].tolist(), anno['valenceValue'].tolist()))\n\n\tusers = pd.DataFrame(data['users'])\n\n\tdf = pd.read_csv(csv, sep='\\t', index_col=0)\n\n\tmain(anno, df)\n\n\n\n","repo_name":"juansgomez87/vis-mtg-mer","sub_path":"merge_research_data.py","file_name":"merge_research_data.py","file_ext":"py","file_size_in_byte":2380,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"94"} +{"seq_id":"34090001303","text":"import cv2\nimport numpy as np\nimport os\nfrom PIL import Image\nimport piexif\n\n\ndef png2jpg(png_path):\n if os.path.splitext(png_path)[-1] != '.png':\n print(f\"\\t> {png_path} is not a png file, skip\")\n return\n img = Image.open(png_path)\n if len(img.split()) == 4:\n\n white = Image.new(\"RGBA\", img.size, color=(255, 255, 255, 255))\n img = Image.alpha_composite(white, img)\n\n R, G, B, A = img.split()\n img = Image.merge(\"RGB\", (R, G, B)).convert('RGB')\n else:\n img = img.convert('RGB')\n jpg_path = os.path.splitext(png_path)[0] + '.jpg'\n img.save(jpg_path, quality=100)\n os.remove(png_path)\n print(\"\\t> changed {} to {}\".format(os.path.basename(png_path),\n os.path.basename(jpg_path)))\n return jpg_path\n\n\ndef checkImage(img_path):\n try:\n pil_im = Image.open(img_path)\n pil_im.load()\n except:\n print(\"\\t> cannot open {}, deleting...\".format(img_path))\n # os.remove(img_path)\n return\n\n try:\n piexif.remove(img_path)\n except:\n print(\"\\t> cannot remove EXIF info of {}, deleting...\".format(img_path))\n # os.remove(img_path)\n # os.system(\"mv '{}' '/home/lzh/Desktop/temp'\".format(img_path))\n return\n\n saving_flag = False\n if pil_im.mode != \"RGB\":\n pil_im = pil_im.convert(\"RGB\")\n saving_flag = True\n print(\"\\t> convert {} to RGB\".format(img_path))\n\n im = np.array(pil_im)\n if len(im.shape) < 2:\n im = im[0]\n pil_im = Image.fromarray(im)\n pil_im.save(os.path.splitext(img_path)[0] + '.jpg', quality=100)\n print(\"\\t> too few channels: {}\".format(img_path))\n elif len(im.shape) == 3 and im.shape[2] >= 4:\n im = im[:, :, :3]\n pil_im = Image.fromarray(im)\n pil_im.save(os.path.splitext(img_path)[0] + '.jpg', quality=100)\n print(\"\\t> too many channels: {}\".format(img_path))\n else:\n if saving_flag:\n pil_im.save(os.path.splitext(img_path)[0] + '.jpg', quality=100)\n\n\ndef renameImage(class_name, class_path, img_name, index):\n name = f\"{class_name}_{index}\" + os.path.splitext(img_path)[-1]\n os.rename(os.path.join(class_path, img_name), os.path.join(class_path, name))\n\n\nif __name__ == '__main__':\n pic_root = './dish/data'\n classes = [c for c in os.listdir(pic_root) if os.path.isdir(os.path.join(pic_root, c))]\n\n for class_name in classes:\n class_path = os.path.join(pic_root, class_name)\n\n for i, img_name in enumerate(os.listdir(class_path)):\n img_path = os.path.join(class_path, img_name)\n # png2jpg(img_path)\n checkImage(img_path)\n renameImage(class_name, class_path, img_name, i)\n\n","repo_name":"lntzm/SmartCanteen","sub_path":"dish/picPreprocess.py","file_name":"picPreprocess.py","file_ext":"py","file_size_in_byte":2754,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"31458073026","text":"import torch\nfrom .BasicConv import BasicConv\nfrom .ResNet18 import ResNet18\nimport torch.nn as nn\nimport torch.optim as optim\nimport time\nimport numpy as np\nfrom tqdm import tqdm\nimport os\nfrom Metrics.metrics import print_metric\nfrom DataManager.SyntheticCharacterDataset import SyntheticCharacterDataset\nimport torch.nn.functional as F\nimport random\nfrom tqdm import tqdm\n\nclass Teacher():\n\n def __init__(self, teacher_type, n_classes):\n self.teacher_type = teacher_type\n self.n_classes = n_classes\n self.prediction_dict = None\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n if teacher_type == 'BasicConv':\n self.model = BasicConv(n_classes)\n elif teacher_type == 'ResNet18':\n self.model = ResNet18(n_classes)\n else:\n raise ValueError('Teacher type not supported')\n\n self.model.to(self.device)\n\n def train(self, train_loader, val_loader, save_dir, n_epochs=10, lr=0.001, verbose_freq=1):\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.Adam(self.model.parameters(), lr=lr)\n \n start_time = time.time()\n self.metrics = {'freq': verbose_freq, 'train_loss': [], 'train_acc': [], \n 'val_loss': [], 'val_acc': [], 'best_val_acc': 0, 'best_epoch': 0}\n self.save_dir = save_dir\n\n self.model.train()\n for epoch in range(1, n_epochs+1):\n running_loss = 0.0\n print(f\"Epoch {epoch}/{n_epochs}:\")\n for images, labels in tqdm(train_loader):\n images, labels = images.to(self.device), labels.to(self.device)\n\n optimizer.zero_grad()\n logits = self.model(images)\n loss = criterion(logits, labels)\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n \n if epoch % verbose_freq == 0:\n self.print_metrics(train_loader, val_loader, epoch, start_time, running_loss)\n\n print(\"Training finished.\")\n\n def print_metrics(self, train_loader, val_loader, epoch, start_time, running_loss):\n epoch_loss = running_loss / len(train_loader)\n train_acc = self.get_accuracy(train_loader)\n val_acc = self.get_accuracy(val_loader)\n\n print(\"_\"*75)\n print_metric('Training Loss', 100 * epoch_loss, 2)\n print_metric('Training accuracy', 100 * train_acc, 2)\n print_metric('Validation accuracy', 100 * val_acc, 2)\n print_metric('Time elapsed (seconds)', round(time.time() - start_time), 0)\n print(\"_\"*75)\n\n self.metrics['train_loss'].append(epoch_loss)\n self.metrics['val_loss'].append(epoch_loss)\n self.metrics['train_acc'].append(train_acc)\n self.metrics['val_acc'].append(val_acc)\n\n if val_acc > self.metrics['best_val_acc']:\n prev = self.save_dir + f\"/teacher_{self.teacher_type}_{str(self.metrics['best_epoch']).zfill(3)}.pt\"\n if os.path.exists(prev):\n os.remove(prev)\n self.metrics['best_val_acc'] = val_acc\n self.metrics['best_epoch'] = epoch\n self.save_model(self.save_dir + f\"/teacher_{self.teacher_type}_{str(self.metrics['best_epoch']).zfill(3)}.pt\")\n\n def get_accuracy(self, test_loader):\n correct = 0\n total = 0\n with torch.no_grad():\n for images, labels in test_loader:\n images, labels = images.to(self.device), labels.to(self.device)\n outputs = self.model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n accuracy = correct / total\n return accuracy\n\n def predict_img(self, img_path, t=1, target=-1):\n self.model.eval()\n img = SyntheticCharacterDataset.load_character_image(img_path)\n img = img.reshape(1, img.shape[0], img.shape[1], img.shape[2]) / 255.0\n img = torch.from_numpy(img).float().to(self.device)\n \n with torch.no_grad():\n logits = self.model(img)\n probs = F.softmax(logits/t, dim=1) if target != 0 else F.log_softmax(logits/t, dim=1)\n pred = probs.data.cpu().argmax().item()\n\n return pred, probs\n\n def generate_prediction_dict(self, saved_path, img_dir, t):\n self.load_model(saved_path)\n self.prediction_dict = {}\n all_imgs = os.listdir(img_dir)\n \n for class_no in range(1, self.n_classes):\n class_str = str(class_no).zfill(3)\n class_imgs = [img for img in all_imgs if img.startswith(class_str)]\n\n while True:\n rand_img = class_imgs[random.randint(0, len(class_imgs)-1)]\n img_path = os.path.join(img_dir, rand_img)\n pred, probs = self.predict_img(img_path, t=t)\n\n if pred == class_no:\n break\n\n self.prediction_dict[class_str]= probs\n\n def get_stacked_probs(self, labels):\n assert self.prediction_dict is not None, \"Teacher's Prediction dictionary is not initialized\"\n \n batch_probs = torch.zeros((len(labels), 31, self.n_classes))\n for label_no, label in enumerate(labels):\n label = list(filter(lambda grapheme_id: grapheme_id != 0, label))\n label = [grapheme_id.item() for grapheme_id in label]\n \n probs = torch.zeros((31, self.n_classes))\n probs_no = 0\n for grapheme_no, grapheme_id in enumerate(label, 1):\n for _ in range(31//len(label)):\n probs[probs_no, :] = self.prediction_dict[str(grapheme_id).zfill(3)]\n probs_no += 1\n\n if len(label) == grapheme_no:\n while probs_no <= 30:\n probs[probs_no, :] = self.prediction_dict[str(grapheme_id).zfill(3)]\n probs_no += 1\n\n batch_probs[label_no, :, :] = probs\n \n batch_probs = batch_probs.transpose(0,1)\n return batch_probs\n\n def save_model(self, path):\n torch.save(self.model.state_dict(), path)\n\n def load_model(self, path):\n self.model.load_state_dict(torch.load(path))","repo_name":"NazmulTakbir/Digitization-of-Bangla-Handwritten-Text","sub_path":"Models/Teacher/Teacher.py","file_name":"Teacher.py","file_ext":"py","file_size_in_byte":6303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"940819320","text":"from argparse import ArgumentParser, ArgumentTypeError\n\nfrom gcMapExplorer.lib.hic2gcmap import hic2gcmap\nfrom gcMapExplorer.lib.hicparser import HicFileType, NormType\n\n\ndef resolution(string):\n if string == \"finest\":\n return string\n\n if string.endswith(\"kb\"):\n string = string[:-2] + \"000\"\n\n try:\n value = int(string)\n except ValueError:\n raise ArgumentTypeError(\"Resolution must be a positive integer or integer ending with 'kb'\")\n else:\n if value <= 0:\n raise ArgumentTypeError(\"Resolution must be a positive integer\")\n return value\n\n\ndef norm(string):\n if string == \"none\":\n return None\n return NormType(string)\n\n\ndef main():\n from os import path\n import sys\n\n parser = ArgumentParser(prog=\"gcMapExplorer hic2gcmap\", description=\"Convert hic files to gcmap\",\n allow_abbrev=False)\n parser.add_argument(\"input\", type=HicFileType(), help=\"hic input file\")\n parser.add_argument(\"output\", type=str, nargs=\"?\", help=\"output file or directory\", default=\".\")\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\"-c\", \"--chromosomes\", type=str, nargs=2, metavar=(\"A\", \"B\"), help=\"a pair of chromosomes A B\")\n group.add_argument(\"-l\", \"--list\", action=\"store_true\", help=\"list all available chromosomes\")\n parser.add_argument(\"--compression\", type=str, choices=(\"lzf\", \"gzip\"), default=\"lzf\", metavar=\"C\",\n help=\"compression type, choose between lzf, gzip (default: lzf)\")\n parser.add_argument(\"-r\", \"--resolution\", type=resolution, default=\"finest\", metavar=\"R\",\n help=\"the resolution, as an integer or as kb (default: finest)\")\n parser.add_argument(\"-n\", \"--norm\", type=str, choices=(\"VC\", \"VC_SQRT\", \"KR\", \"none\"), metavar=\"N\",\n default=\"none\",\n help=\"the type of norm to use, choose between VC, VC_SQRT, KR, none (default: none)\")\n parser.add_argument(\"--downsampling\", type=str, choices=(\"sum\", \"mean\", \"max\", \"none\"), default=\"sum\", metavar=\"D\",\n help=\"the downsampling method to use, choose between sum, mean, max, none (default: sum)\")\n\n args = parser.parse_args(args=sys.argv[sys.argv.index(\"hic2gcmap\") + 1:])\n\n hic = args.input\n\n if args.list:\n print(\", \".join(hic.chromosomes))\n return\n\n if path.isdir(args.output):\n filename = path.splitext(path.basename(args.input.name))[0]\n output_file = path.join(args.output, filename)\n if args.chromosomes:\n output_file += \"_\" + \"_\".join(args.chromosomes)\n if args.downsampling == \"none\" and args.resolution != \"finest\":\n output_file += \"_\" + str(args.resolution)[:-3] + \"kb\"\n if args.norm != \"none\":\n output_file += \"_\" + args.norm\n output_file += \"_\" + args.compression\n output_file += \".gcmap\"\n else:\n output_file = args.output\n\n if args.chromosomes:\n chr1, chr2 = args.chromosomes\n try:\n hic2gcmap(hic, chr1, chr2, output_file, resolution=args.resolution, norm_type=norm(args.norm),\n compression=args.compression,\n downsampling=args.downsampling)\n except Exception as e:\n print(\"Error: {}\".format(e))\n else: # all chromosomes\n chromosome_names = filter(lambda pair: \"All\" not in pair, (hic.chromosome_names(r) for r in hic.records))\n for chr1, chr2 in chromosome_names:\n try:\n hic2gcmap(hic, chr1, chr2, output_file, resolution=args.resolution, norm_type=norm(args.norm),\n compression=args.compression,\n downsampling=args.downsampling)\n except Exception as e:\n print(\"Error: {}\".format(e))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"rjdkmr/gcMapExplorer","sub_path":"gcMapExplorer/clui/hic2gcmap.py","file_name":"hic2gcmap.py","file_ext":"py","file_size_in_byte":3880,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"94"} +{"seq_id":"8174667736","text":"from cso_classifier import CSOClassifier\n\nfrom utils.article import Article\n\n\nclass CSOClassification:\n def __init__(self):\n self.classifier = CSOClassifier(\n workers=1, modules=\"both\", enhancement=\"first\", explanation=True\n )\n\n def classify_search_result(self, search_result):\n # input format\n \"\"\"\n papers = { \"id1\": { \"title\": '...', \"abstract\": '...', \"keywords\": ['...',]},\n \"id2\": { \"title\": '...', \"abstract\": '...', \"keywords\": ['...',]}}\n \"\"\"\n papers = {}\n for article in search_result:\n papers[article.id] = {\n \"title\": article.title,\n \"abstract\": article.abstract,\n \"keywords\": article.keywords_rest,\n }\n\n concepts = self.classifier.batch_run(papers)\n\n # output format\n \"\"\"\n {\"id1\": {\"syntactic\": [...], \"semantic\": [...], \"union\": [...], \"enhanced\": [...], \"explanation\": {...}},\n \"id2\": {\"syntactic\": [...], \"semantic\": [...], \"union\": [...], \"enhanced\": [...], \"explanation\": {...}}}\n \"\"\"\n\n for article in search_result:\n article.CSO_keywords = concepts[article.id][\"union\"]\n\n return search_result\n\n def classify_single_paper(self, paper: Article):\n \"\"\"Runs CSO classifier on a single article.\"\"\"\n paper.CSO_keywords = self.classifier.run(\n {\n \"title\": paper.title,\n \"abstract\": paper.abstract,\n \"keywords\": paper.keywords_snippet,\n }\n )[\"union\"]\n\n return paper\n","repo_name":"ProjectDossier/cruise-screening","sub_path":"src/cruise_literature/concept_search/concept_classification.py","file_name":"concept_classification.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"94"} +{"seq_id":"8448541936","text":"from django.urls import path\nfrom .views import *\n\nurlpatterns = [\n #Sites\n path('<int:pk>/', DetailActivite.as_view()),\n path('', ListActivite.as_view()),\n path('create', CreateActivite.as_view()),\n path('delete/<int:pk>', DeleteActivite.as_view()),\n\n # Commentaire\n path('commentaire/<int:pk>/', DetailCommentaire.as_view()),\n path('commentaire', ListCommentaire.as_view()),\n path('commentaire/create', CreateCommentaire.as_view()),\n path('commentaire/delete/<int:pk>', DeleteCommentaire.as_view()),\n # Favoris\n path('favoris/<int:pk>/', DetailFavorisActivite.as_view()),\n path('favoris', ListFavorisActivite.as_view()),\n path('favoris/create', CreateFavorisActivite.as_view()),\n path('favoris/delete/<int:pk>', DeleteFavorisActivite.as_view()),\n]","repo_name":"Djouko/Tradis_Backend","sub_path":"Activite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"24797867106","text":"import time\nimport urllib\nimport base64url\n\nfrom flask import Flask, request\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\n\napp = Flask(__name__)\n\n@app.route('/', methods=['GET'])\ndef search():\n if 'q' not in request.args:\n return '', 400\n q = request.args.get('q', type=str)\n q = base64url.base64_decode(q)\n return jsed_html(q), 200\n\n\ndef getWebDriver():\n options = Options()\n options.add_argument('--headless')\n options.add_argument('--no-sandbox')\n return webdriver.Chrome(options=options)\n\n\ndriver = getWebDriver()\ndef jsed_html(url):\n \"\"\"\n urlからjavascriptを実行したhtmlを返す\n \"\"\"\n print(url)\n driver.get(url)\n time.sleep(0.8)\n html = driver.page_source\n return html\n","repo_name":"slimemoss/slimemoss.github.io","sub_path":"tool/gencarddb/curljs/app/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"23136899579","text":"from pickle import TRUE\nimport sys\nimport glob\nimport os\nimport json\nimport string\nfrom pprint import pprint \nimport math\n\ndef writeModel(tagDict, tagIndexMap, vocabDict, initProbMatrix, emissionMatrix, transitionMatrix, tagVsWordCounts):\n #print(\"writing to file: hmmmodel.txt\")\n file = open(\"hmmmodel.txt\", 'w', encoding='utf-8')\n \n model = {}\n model[\"tagDict\"] = tagDict #tag vs (index, count)\n model[\"tagIndexMap\"] = tagIndexMap #index vs tag\n model[\"vocabDict\"] = vocabDict #word vs (index, count)\n model['initProbMatrix'] = initProbMatrix # index is tag index\n model[\"emissionMatrix\"] = emissionMatrix #rows: tags, columns: words\n model[\"transitionMatrix\"] = transitionMatrix #rows: prev tag ti-1, column: ti\n model[\"tagVsWordCounts\"] = tagVsWordCounts\n\n json.dump(model, file, indent=4)\n\ndef hmmLearn(filepath):\n\n taggedSentences = open(filepath, \"r\", encoding = 'utf-8').readlines()\n tagDict, tagIndexMap, vocabDict = getDicts(taggedSentences)\n #print(f\"learn: tag size: {len(tagDict)}, tagIndex size: {len(tagIndexMap)}, word size: {len(vocabDict)}\")\n \n initProbMatrix, emissionMatrix, transitionMatrix, tagVsWordCounts = calcMatrix(tagDict, tagIndexMap, vocabDict, taggedSentences)\n #print(f\"emissionMatrix size: {len(emissionMatrix)}, transitionMatrix size: {len(transitionMatrix)}\")\n\n writeModel(tagDict, tagIndexMap, vocabDict, initProbMatrix, emissionMatrix, transitionMatrix, tagVsWordCounts)\n\ndef getDicts(taggedSentences):\n tagDict = {}\n vocabDict = {}\n tagIndexMap = {}\n tagVsWordSet = {}\n tagIndex = 0\n vocabIndex = 0\n for sentence in taggedSentences:\n taggedWords = sentence.split()\n prevTag = '_START_'\n for taggedWord in taggedWords:\n word, tag = split(taggedWord)\n\n if(tagDict.get(tag) is None):\n tagDict[tag] = (tagIndex, 0)\n tagIndexMap[tagIndex] = tag\n tagIndex += 1\n tagVsWordSet[tag] = set()\n tagDict[tag] = (tagDict[tag][0], tagDict[tag][1] + 1)\n tagVsWordSet[tag].add(word)\n if(vocabDict.get(word) is None):\n vocabDict[word] = (vocabIndex, 0)\n vocabIndex += 1\n vocabDict[word] = (vocabDict[word][0], vocabDict[word][1] + 1)\n\n #print(f\"tag vs word: {len(tagVsWordSet)}\")\n return tagDict, tagIndexMap, vocabDict\n\ndef calcMatrix(tagDict, tagIndexMap, vocabDict, taggedSentences):\n emissionMatrix = [[0]* len(vocabDict) for _ in range(len(tagDict))]\n transitionMatrix = [[1] * len(tagDict) for _ in range(len(tagDict))]\n initProbMatrix = [1] * len(tagDict) #todo\n\n for taggedSentence in taggedSentences:\n taggedWords = taggedSentence.split()\n prevTag = '_START_'\n for taggedWord in taggedWords:\n word, tag = split(taggedWord)\n \n if(tagDict.get(tag) is None): continue #todo\n\n emissionMatrix[tagDict[tag][0]][vocabDict[word][0]] += 1\n\n if prevTag != '_START_':\n transitionMatrix[tagDict[prevTag][0]][tagDict[tag][0]] += 1\n else:\n initProbMatrix[tagDict[tag][0]] += 1\n prevTag = tag\n\n tagVsWordCounts = getTagVsWordCount(emissionMatrix)\n\n for i in range(len(initProbMatrix)):\n initProbMatrix[i] = initProbMatrix[i]/(len(taggedSentences) + len(tagDict)) #tagDict[tagIndexMap[i]][1] #(len(taggedSentences) + len(tagDict))\n\n for i in range(len(emissionMatrix)):\n for j in range(len(emissionMatrix[0])):\n emissionMatrix[i][j] = emissionMatrix[i][j]/(tagDict[tagIndexMap[i]][1])\n\n for i in range(len(transitionMatrix)):\n for j in range(len(transitionMatrix[0])):\n transitionMatrix[i][j] = transitionMatrix[i][j]/(tagDict[tagIndexMap[i]][1] + len(tagDict))\n # print(\"init\")\n # print(initProbMatrix)\n # print(\"emission\")\n # print(emissionMatrix)\n # print(\"transition\")\n # print(transitionMatrix)\n return initProbMatrix, emissionMatrix, transitionMatrix, tagVsWordCounts\n\ndef getTagVsWordCount(emissionMatrix):\n tagVsWordCounts = {}\n for i in range(len(emissionMatrix)):\n count = 0\n uniqCount = 0\n for j in range(len(emissionMatrix[0])):\n if(emissionMatrix[i][j] == 1):\n uniqCount += 1\n count += emissionMatrix[i][j]\n tagVsWordCounts[i] = (count, uniqCount)\n \n #pprint(tagVsWordCounts)\n return tagVsWordCounts\n\ndef split(wordAndTag):\n split = wordAndTag.rsplit('/', 1)\n word = split[0]\n tag = split[1]\n return word, tag\n\nif __name__ == \"__main__\" :\n\n hmmLearn(sys.argv[1])\n #Write model","repo_name":"sowmyavoona96/csci544","sub_path":"Assignment_2_HMM/hmmlearn.py","file_name":"hmmlearn.py","file_ext":"py","file_size_in_byte":4675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"27288763933","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Feb 28 16:10:12 2022\r\n\r\n@author: Shachi Mishra\r\n\"\"\"\r\n\r\n \r\n# list of numbers\r\nlist2 = [12,14,-95,3] \r\n\r\n# iterating each number in list\r\nfor num in list2:\r\n \r\n # checking condition\r\n if num >= 0:\r\n print(num, end = \" \")\r\n \r\n# Function for nth Fibonacci number\r\ndef Fibonacci(n):\r\n \r\n # Check if input is 0 then it will\r\n # print incorrect input\r\n if n < 0:\r\n print(\"Incorrect input\")\r\n ","repo_name":"ShachiMishra/MyCaptainAssignment","sub_path":"task 2-1.py","file_name":"task 2-1.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"3508394678","text":"import sys, os, time, errno\nimport numpy as np\nimport h5py\nimport kimimaro\nfrom scipy import interpolate, spatial\nimport collections\nimport types\n\nfrom skimage import morphology\nimport trimesh\n\nfrom annotator.Annotator._get_radius_ray import _get_radius_ray\n\nclass GenerateSkeleton:\n def __init__( self, ids_volume, pitch, skeletons_path, surfaces_path):\n self.xpitch = pitch[0]\n self.ypitch = pitch[1]\n self.zpitch = pitch[2]\n self.ids_volume = ids_volume\n self.skeletons_path = skeletons_path\n self.surfaces_path = surfaces_path\n\n def set_params( self, scale, constant, min_voxel, max_path, smooth, extra_after):\n self.scale = scale\n self.constant = constant\n self.min_voxel = min_voxel\n self.max_path = max_path\n self.smooth = smooth\n self.extra_after = extra_after\n\n def exec( self, id, markerlocs ):\n\n # mask = (self.ids_volume == id)\n\n\t##\n\t## Markerpoint location correction.\n\t##\n xmax, ymax, zmax = self.ids_volume.shape\n\n markerlocs_int_source = [(int(loc[0]/self.xpitch), int(loc[1]/self.ypitch), int(loc[2]/self.zpitch)) for loc in markerlocs]\n markerlocs_int = []\n\n jmax = 200\n pad_width = ((1,1),(1,1),(1,1))\n #print(\"markerlocs_int_source: \", markerlocs_int_source)\n if markerlocs_int_source != []:\n \tprint(\"Targ ID: \", id)\n \tfor i, oloc in enumerate(markerlocs_int_source):\n \t\tif self.ids_volume[oloc[0], oloc[1], oloc[2]] == id:\n \t\t\tmarkerlocs_int.append( oloc )\n \t\t\tcontinue\n \t\telse:\n \t\t\tj = 0\n \t\t\twhile (j < jmax):\n \t\t\t\touter = morphology.ball(j+1, dtype='bool')\n \t\t\t\tinner = np.pad( morphology.ball(j, dtype='bool') , pad_width)\n \t\t\t\tball = np.where(outer ^ inner)\n \t\t\t\tj += 1\n \t\t\t\tlocs = [(ix+oloc[0],iy+oloc[1],iz+oloc[2]) for ix,iy,iz in zip(ball[0], ball[1], ball[2])]\n \t\t\t\t#print('j = ', j)\n \t\t\t\tfor l in locs:\n \t\t\t\t\tif (l[0] < 0) or (l[1] < 0) or (l[2] < 0) or (l[0]>=xmax) or (l[1]>=ymax) or (l[2]>=zmax):\n \t\t\t\t\t\tcontinue\n \t\t\t\t\tif self.ids_volume[l[0], l[1], l[2]] == id:\n \t\t\t\t\t\tmarkerlocs_int.append( l )\n \t\t\t\t\t\tj = jmax\n \t\t\t\t\t\tbreak\n\n print(\"markerlocs_int: \", markerlocs_int)\n\n ##\n\t## Skeletonaization\n\t##\n\n print(\"Kimimaro initialization...\")\n\n if (self.max_path == 2) and (len(markerlocs_int) == 2):\n \tskel = self._unbranched_centerline( id, markerlocs_int )\n else :\n \tskel = self._skeletonize( id, markerlocs_int )\n\n if isinstance(skel, type(None)) :\n \tprint('No skeleton. ')\n \treturn False\n\n vertices = skel.vertices\n edges = skel.edges\n\n\n if vertices.shape[0] < 4:\n \tprint('No skeleton. ')\n \treturn False\n if edges.shape[0] < 4:\n \tprint('No skeleton. ')\n \treturn False\n\n\n\t##\n\t## Smoothing\n\t##\n new_vertices, new_edges, new_lengths, new_tangents = self._smoothing(vertices, edges)\n\n if new_vertices.shape[0] < 4:\n \tprint('No skeleton: ', id)\n \treturn False\n if new_edges.shape[0] < 4:\n \tprint('No skeleton: ', id)\n \treturn False\n\n ##\n\t## Calculate radiuses for each vartices (k-nearst neighbor)\n\t##\n# surface_vertices = self._load_surface_vertices(id)\n# tree = spatial.cKDTree(surface_vertices)\n# dists, indexes = tree.query(new_vertices, k=5)\n# new_radiuses\t = np.mean(dists, axis=1)\n# print('new_radiuses: ', new_radiuses.shape)\n\n ##\n\t## Calculate radiuses for each vartices (raycaster)\n\t##\n mesh = self._load_surface_mesh(id)\n new_radiuses\t = _get_radius_ray(new_vertices, new_tangents, mesh)\n print('new_radiuses: ', new_radiuses.shape)\n\n\t##\n\t## Save skeleton\n\t##\n skeleton_ids = self._save_skeleton_file(id, new_vertices, new_edges, new_radiuses, new_lengths, new_tangents)\n\n\n\n\t##\n\t## Smoothing\n\t##\n\n def _smoothing(self, vertices, edges):\n\n\t# Obtain crossing edges\n edges_ids = edges.flatten().tolist()\n edges_num_connect = collections.Counter(edges_ids)\n edges_cross = [k for k, v in edges_num_connect.items() if v > 2]\n\n start_pairs = []\n neighbors_cross = []\n\n\t# Obtain the pairs of [crossing edges, neighboring edges]\n\n if edges_cross == []:\n \tedges_ends = [k for k, v in edges_num_connect.items() if v == 1]\n \tpartner = edges[np.any(edges == edges_ends[0], axis=1),1][0]\n \tstart_pair = [edges_ends[0], partner]\n \tsegment = self._obtain_segment(start_pair, edges)\n# \tprint('segment: ', segment)\n \treturn self._exec_smoothing(vertices, [segment])\n\n for id_cross in edges_cross:\n \ttmp = edges[np.any(edges == id_cross, axis=1),:].flatten()\n \t_neighbors_cross = tmp[tmp != id_cross]\n# \tprint('id_cross: ', id_cross, ', neighbors_cross: ', _neighbors_cross)\n \tpairs = [[id_cross, neighbor] for neighbor in _neighbors_cross]\n \tneighbors_cross.extend( _neighbors_cross.tolist() )\n \tstart_pairs.extend( pairs )\n# print('start_pairs: ',start_pairs)\n\n flag_used = np.zeros(len(neighbors_cross))\n neighbors_cross = np.array(neighbors_cross)\n\n\t# Track each segment\n segments = []\n new_segments = []\n for i, pair in enumerate(start_pairs):\n \tif flag_used[i] == 0:\n \t\tsegment = self._obtain_segment(pair, edges)\n \t\tsegments.append( segment )\n \t\tflag_used += (neighbors_cross == segment[-2])\n\n # print('segments', segments)\n return self._exec_smoothing(vertices, segments)\n\n\n def _exec_smoothing(self, vertices, segments):\n\n num_pts = 200\n large_value = 100000000\n\n\t# Set new coordinate\n vertices_list = vertices.tolist()\n new_vertices = []\n new_lengths = []\n new_tangents = []\n new_edges\t = []\n\n\t# Smoothing and mapping of segments\n for segment in segments:\n\t\n\t\t## Smoothing\n \tif len(segment) < 4:\n \t\t# print('segment: ', segment)\n \t\tcontinue\n \tx = vertices[segment,0]\n \ty = vertices[segment,1]\n \tz = vertices[segment,2]\n \tw = np.ones(x.shape[0])*10\n \tw[0] = large_value\n \tw[-1] = large_value\n \ttck, u = interpolate.splprep([x,y,z], s=self.smooth, w=w ) # s = 50\n \tu_fine = np.linspace(0,1,num_pts)\n \tx_fit, y_fit, z_fit = interpolate.splev(u_fine, tck)\n \t\n \t## Segmental lengths\n \tx_diff = np.diff(x_fit)\n \ty_diff = np.diff(y_fit)\n \tz_diff = np.diff(z_fit)\n \ttmp_len = np.sqrt(x_diff*x_diff + y_diff*y_diff + z_diff*z_diff)\n \ttmp_lengths = np.append(tmp_len, 0)/2 + np.append(0,tmp_len)/2\n \ttmp_lengths = tmp_lengths.tolist()\n \t# print('tmp_lengths: ', tmp_lengths)\n \t\n \t## Segmental tangents\n \ttmp_tangents = np.stack((x_diff, y_diff, z_diff))\n \ttmp_tangents /= tmp_len\n \ttmp_tangents = tmp_tangents.T\n \ttmp_tangents = np.vstack( [tmp_tangents, tmp_tangents[-1,:].reshape(1,3)] )\n\n\t\t## Mapping\n \ttmp_verts = [[ix,iy,iz] for ix,iy,iz in zip(x_fit, y_fit, z_fit) ]\n \tids_start_new = len(new_vertices)\n \tnew_vertices.extend(tmp_verts)\n \tnew_lengths.extend(tmp_lengths)\n \tnew_tangents.extend(tmp_tangents)\n \tids_end_new = len(new_vertices)\n \ttmp_edges = [[i, i+1] for i in range(ids_start_new,ids_end_new-1)]\n \tnew_edges.extend(tmp_edges)\n\t\n new_vertices = np.array(new_vertices)\n new_edges\t\t = np.array(new_edges)\n new_lengths \t = np.array(new_lengths)\n new_tangents\t = np.array(new_tangents)\n print('Vertices: ', new_vertices.shape)\n print('Edges : ', new_edges.shape)\n print('Lengths : ', new_lengths.shape)\n print('Tangents: ', new_tangents.shape)\n print('Total length : ', np.sum(new_lengths))\n\n return new_vertices, new_edges, new_lengths, new_tangents\n\n\n def _load_surface_vertices(self, id):\n print('Loading surface, ID: ', id)\n filename = self.surfaces_path + os.sep + str(id).zfill(10)+'.stl'\n mesh = trimesh.load(filename)\n vertices = mesh.vertices\n print('')\n return vertices\n\n def _load_surface_mesh(self, id):\n print('Loading surface, ID: ', id)\n filename = self.surfaces_path + os.sep + str(id).zfill(10)+'.stl'\n mesh = trimesh.load(filename)\n print('')\n return mesh\n\n\n def _save_skeleton_file(self, id, vertices, edges, radiuses, lengths, tangents):\n filename = self.skeletons_path + os.sep + str(id).zfill(10)+'.hdf5'\n with h5py.File( filename ,'w') as f:\n \tf.create_dataset('vertices', data=vertices)\n \tf.create_dataset('edges' , data=edges)\n \tf.create_dataset('radiuses', data=radiuses)\n \tf.create_dataset('lengths' , data=lengths)\n \tf.create_dataset('tangents', data=tangents)\n print('Generated skeleton ID: ', id)\n print('')\n return True\n\n\n def _obtain_segment(self, start_pair, edges):\n output_edges = start_pair # list\n while True:\n \ttmp = edges[np.any(edges == output_edges[-1], axis=1),:]\n \ttmp = tmp[np.all(tmp != output_edges[-2], axis=1),:]\n \tif (tmp.shape[0] == 1):\n \t\tnewid = tmp[tmp != output_edges[-1]].tolist()\n \t\toutput_edges.extend(newid)\n \telse:\n \t\treturn output_edges\n\n\n def _skeletonize( self, id, markerlocs_int):\n\n if self.extra_after == True:\n \textra_targets_after = markerlocs_int\n \textra_targets_before = []\n else :\n \textra_targets_after = []\n \textra_targets_before = markerlocs_int\n\n teasar_params={\\\n\t\t'scale': self.scale,\n\t\t'const': self.constant, # physical units default 500\n\t\t'pdrf_exponent': 4,\n\t\t'pdrf_scale': 100000,\n\t\t'soma_detection_threshold': 1100, # physical units\n\t\t'soma_acceptance_threshold': 3500, # physical units\n\t\t'soma_invalidation_scale': 1.0,\n\t\t'soma_invalidation_const': 300, # physical units\n\t\t'max_paths': self.max_path}\n\n skels = kimimaro.skeletonize(\n\t self.ids_volume, \n\t teasar_params=teasar_params,\n\t object_ids=[ id ], # process only the specified labels\n\t #extra_targets_before=markerlocs_int, # target points in voxels\n\t extra_targets_after=extra_targets_after, # target points in voxels\n\t extra_targets_before=extra_targets_before, # target points in voxels\n\t dust_threshold=self.min_voxel, # skip connected components with fewer than this many voxels\n\t anisotropy=( self.xpitch, self.ypitch, self.zpitch ), # default True\n\t fix_branching=True, # default True\n\t fix_borders=True, # default True\n\t progress=True, # default False, show progress bar\n\t parallel=1, # <= 0 all cpu, 1 single process, 2+ multiprocess\n\t parallel_chunk_size=100, # how many skeletons to process before updating progress bar\n )\n\n print(\"skels: \", skels)\n skel = skels[id]\n return skel\n\n\n def _unbranched_centerline( self, id, markerlocs_int):\n\n print('Unbranched line between markerpoints.')\n skel = kimimaro.connect_points(\n\t labels = (self.ids_volume == id),\n start = markerlocs_int[0],\n end = markerlocs_int[1],\n anisotropy = ( self.xpitch, self.ypitch, self.zpitch ),\n pdrf_scale=100000, \n pdrf_exponent=4\n )\n return skel\n\n","repo_name":"urakubo/UNI-EM_annotator","sub_path":"annotator/Annotator/GenerateSkeleton.py","file_name":"GenerateSkeleton.py","file_ext":"py","file_size_in_byte":10649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"72400079989","text":"from dataset import Dataset\r\nfrom dataloader.dataloader import Dataloader\r\nfrom models.vistanet import VistaNet\r\nfrom models.reducenet import ReduceNet\r\nimport json,math,tqdm\r\nfrom trainer.supervised_trainer import SupervisedTrainer\r\nfrom evaluator import Evaluator\r\nimport logging\r\nimport warnings\r\nimport os,torch\r\nfrom models.model_img_side import VggNet\r\nfrom models.model_text_side import TextNet\r\n \r\nwarnings.filterwarnings(\"ignore\")\r\n\r\ndef init_logger(config):\r\n\r\n logfilepath =config['log_file'] if config['log_file'] else config['log_path']\r\n\r\n filefmt = \"%(asctime)-15s %(levelname)s %(message)s\"\r\n filedatefmt = \"%a %d %b %Y %H:%M:%S\"\r\n fileformatter = logging.Formatter(filefmt, filedatefmt)\r\n\r\n sfmt = \"%(asctime)-15s %(levelname)s %(message)s\"\r\n sdatefmt = \"%d %b %H:%M\"\r\n sformatter = logging.Formatter(sfmt, sdatefmt)\r\n if config['state'] is None or config['state'].lower() == 'info':\r\n level = logging.INFO\r\n elif config['state'].lower() == 'debug':\r\n level = logging.DEBUG\r\n elif config['state'].lower() == 'error':\r\n level = logging.ERROR\r\n elif config['state'].lower() == 'warning':\r\n level = logging.WARNING\r\n elif config['state'].lower() == 'critical':\r\n level = logging.CRITICAL\r\n else:\r\n level = logging.INFO\r\n fh = logging.FileHandler(logfilepath)\r\n fh.setLevel(level)\r\n fh.setFormatter(fileformatter)\r\n\r\n sh = logging.StreamHandler()\r\n sh.setLevel(level)\r\n sh.setFormatter(sformatter)\r\n\r\n logging.basicConfig(\r\n level=level,\r\n handlers=[fh, sh]\r\n )\r\n\r\n\r\ndef quick_start(config):\r\n dataset = Dataset(config)\r\n dataset._load_train_and_valid_dataset(file=config[\"train_file\"])\r\n dataset._load_test_dataset(file=config['test_file'])\r\n dataloader = Dataloader(config,dataset)\r\n if config['model'] == \"reducenet\":\r\n model = ReduceNet(config,dataloader)\r\n elif config['model'] == \"vistanet\":\r\n model = VistaNet(config,dataloader)\r\n elif config['model'] == \"vgg\":\r\n model = VggNet(config)\r\n elif config['model'] == 'textnet':\r\n model = TextNet(config,dataloader)\r\n if config['device']:\r\n model = model.to(config['device'])\r\n evaluator = Evaluator(dataloader.pretrained_tokenizer)\r\n trainer = SupervisedTrainer(config,model,dataloader,evaluator)\r\n if config['training_resume'] or config['resume']:\r\n trainer._load_checkpoint()\r\n init_logger(config)\r\n logger = logging.getLogger()\r\n logger.info(model)\r\n trainer.fit()\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n # os.environ['CUDA_LAUNCH_BLOCKING'] = \"1\"\r\n modelname = \"textnet\" #vistanet,vggnet,textnet,reducenet\r\n #vistanet is the multimodal model.\r\n #vggnet is img only and the textnet is text only.\r\n f = open('config_settings/config_'+modelname+\".json\")\r\n config = json.load(f)\r\n torch.cuda.empty_cache()\r\n quick_start(config)","repo_name":"zhaoyib/img_and_text_sentiment_classification","sub_path":"quickstart.py","file_name":"quickstart.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"31246493817","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 8 14:31:19 2021\n\n@author: mirko\n\"\"\"\nimport os\nimport pandas as pd\nimport numpy as np\nfrom scipy.interpolate import CubicSpline\n\ndef read_ais(location_name):\n \"\"\"\n Function for reading the AIS data.\n input: str format number denoting the sound monitoring location \"20\", \"21\", \"22\", \"23\"\n output: pandas dataframe containing all the AIS data from one monitoring location\n \"\"\"\n ais_dir = 'AIS_BIASstations' # Directory with monitoring location AIS data folders\n # List locations' monthly AIS data files\n ais_loc_path = os.path.join(ais_dir, 'BIAS' + location_name)\n # Filter out files except .txt files\n files_list = list(filter(lambda k: '.txt' in k, os.listdir(ais_loc_path)))\n k = 0\n # Loop through the monthly AIS data files\n for i_file in files_list:\n i_file = os.path.join(ais_loc_path, i_file) # Construct file path for read\n data = pd.read_table(i_file, sep=';') # Read the data from the file\n daytime = data.DAY + ' ' + data.TIME # Add the date and time together into one timestamp\n\n # Construct a new dataframe with new variable names from the read data\n ais_data = pd.DataFrame(data={'Time': daytime, 'Latitude': data.LAT,\n 'Longitude': data.LON, 'MMSI': data.MMSI,\n 'Vs': data.Vs, 'Ship_type': data.TYPEc,\n 'L1': data.L1, 'L2': data.L2, 'B1': data.B1,\n 'B2': data.B2, 'Draught': data.DRAUGHT})\n\n # Convert the timestamp to the datetime format\n ais_data.Time = pd.to_datetime(ais_data.Time, format='%d-%m-%Y %H:%M:%S', utc=True)\n # Add the AIS data from one location together into one variable\n if k == 0:\n ais_data_l = ais_data.copy() # Create a file to append first iteration data\n k = 1\n else:\n ais_data_l = ais_data_l.append(ais_data) # Append to the data frame\n return ais_data_l # return the pandas dataframe only\n\n# from shapely.geometry import Point\n# from distance_az import calc_los_area_polygon\n# def read_spacial_filt_ais(location_name):\n# \"\"\"\n# Function for spacial filtering the AIS data.\n# input: str format number denoting the sound monitoring location \"20\", \"21\", \"22\", \"23\"\n# output: pandas dataframe containing all the AIS data from one monitoring location with\n# all points outside 20 km or behind land excluded\n# \"\"\"\n# ais_data_l = read_ais(location_name)\n# # If MMSI appears only once discard the data\n# ais_data_l = ais_data_l[ais_data_l.groupby('MMSI').MMSI.transform(len) > 1]\n\n# #---Spacial filter----\n# dep_meta_tab = pd.read_table('dep_meta_data.txt', sep=' ')\n# dep_meta_tab_st = dep_meta_tab[dep_meta_tab[\"station\"] == int(location_name)]\n# dep_lat_mean = dep_meta_tab_st['Latitude'].mean()\n# dep_lon_mean = dep_meta_tab_st['Longitude'].mean()\n\n# los_poly = calc_los_area_polygon(dep_lat_mean, dep_lon_mean)\n# filt = np.zeros(len(ais_data_l), dtype=bool)\n\n# for i in np.arange(len(ais_data_l)):\n# ship_point = Point(ais_data_l.Longitude.values[i], ais_data_l.Latitude.values[i])\n# filt[i] = los_poly.contains(ship_point)\n\n# filt = pd.Series(filt, name='bools')\n# ais_data_l = ais_data_l[filt.values]\n# return ais_data_l\n\n\ndef read_add_voyages_ais(location_name):\n \"\"\"\n Function for reading the AIS data and adding the voyage numbers to the data\n input: str format number denoting the sound monitoring location \"20\", \"21\", \"22\", \"23\"\n output: pandas dataframe containing all the AIS data from one monitoring location with the\n Voyages variable listing the voyage numbers for different MMSI-s\n \"\"\"\n ais_data_l = read_ais(location_name) # read_ais function for reading the AIS data\n # Remove the data a ship has only one row in the dataframe\n ais_data_l = ais_data_l[ais_data_l.groupby('MMSI').MMSI.transform(len) > 1]\n\n ships = ais_data_l.MMSI.unique() # Make a list of unique MMSI numbers\n time_thresh = int(20*60) # Set the time threshold for comparing the data in int(min*seconds)\n time_dif_min = 15 # For filtering out points closer than 15 seconds in time\n # Loop through all the different MMSI numbers in the data\n for one_ship in ships:\n # Create dataframe with on MMSI AIS data only\n ais_one_ship = ais_data_l[ais_data_l.MMSI == one_ship]\n # Sort the AIS data according to time\n ais_one_ship = ais_one_ship.sort_values(by='Time')\n\n i_no = 0 # Assign numbers to ship voyages, the first voyage is numbered 0\n voyage_no = [0] * len(ais_one_ship) # Make list zeros for voyage numbers\n # Create empty list for filtering points closer > 15 s in time\n filter_ls = list(bytearray(len(ais_one_ship)))\n filter_ls[0] = True # Set first to be True\n # Loop through all the AIS data points from a single ship\n for i in range(1, len(ais_one_ship)):\n # Calculate the time difference between two consecutive AIS data points in UNIX time\n i_time_dif = int(ais_one_ship.Time.values[i])/10.**9 - int(ais_one_ship.Time.values[i-1])/10.**9\n # If the time difference between two consecutive points is larger\n # than the time threshold the second point is from a new voyage of a ship\n if i_time_dif > time_thresh:\n i_no += 1\n voyage_no[i] = i_no # Add the voyage number to the list of voyages\n # Check a wether points close in time and add result to list\n filter_ls[i] = i_time_dif > time_dif_min\n # Add the voyage numbers to the dataframe of the single ship\n ais_one_ship['Voyage'] = pd.Series(voyage_no, index=ais_one_ship.index)\n ais_one_ship = ais_one_ship[filter_ls] # Filtering points closer > 15 s in time\n # If ships voyage only has one data point discard the data\n ais_one_ship = ais_one_ship[ais_one_ship.groupby('Voyage').Voyage.transform(len) > 1]\n if one_ship == ships[0]:\n ais_data_voy_l = ais_one_ship.copy() # Create a df to append first iteration data\n else:\n ais_data_voy_l = ais_data_voy_l.append(ais_one_ship) # Append to the df\n return ais_data_voy_l # return the pandas dataframe only\n\n\ndef read_interpolate_ais(location_name):\n \"\"\"\n Function that reads AIS data, adds voyage no-s and interpolates the ship locations\n for a regular 20 second time step\n input: str format number denoting the sound monitoring location \"20\", \"21\", \"22\", \"23\"\n output: ais_data_l - pandas dataframe containing interpolated AIS data with 20 s time steps\n ais_voyages_l - pandas dataframe containing ships voyages data length, width, draught\n \"\"\"\n # read_add_voyages_ais function for reading the AIS data and adding voyage numbers\n ais_data_voy_l = read_add_voyages_ais(location_name)\n # Remove data if a voyage has less than 3 points\n ais_data_voy_l = ais_data_voy_l[ais_data_voy_l.groupby('Voyage').Voyage.transform(len) > 3]\n # Make new variable MMSI_Voy listing MMSI number and voyage number together in str format\n ais_data_voy_l[\"MMSI_Voy\"] = ais_data_voy_l[\"MMSI\"].astype(\"str\") + ' ' + ais_data_voy_l[\"Voyage\"].astype(\"str\")\n # Make a list of unique MMSI_Voy numbers\n ships_voys = ais_data_voy_l.MMSI_Voy.unique()\n\n count = 1 # Set counter for tracking progress\n # Loop through all the different MMSI_Voy-s in the data\n for one_voy in ships_voys:\n print(count, '/', len(ships_voys)) # Show progress by printing number of MMSI_Voy\n count += 1\n # Subset data to be from one voyage of a single ship\n ais_one_ship_voy = ais_data_voy_l[ais_data_voy_l.MMSI_Voy == one_voy]\n # Convert the timestamps from dateimetime to UNIX time\n time = ais_one_ship_voy.Time.values.astype(int)/10.**9\n # Fit cubic splines through the existing Longitude and time data\n csx = CubicSpline(time, ais_one_ship_voy.Longitude.values)\n # Fit cubic splines through the existing Latitude and time data\n csy = CubicSpline(time, ais_one_ship_voy.Latitude.values)\n\n # Creata an array with one second unix time stamps from first to last point of one voyage of a ship\n n_time = np.arange(int(ais_one_ship_voy.Time.values[0])/10.**9,\n int(ais_one_ship_voy.Time.values[-1])/10.**9 + 1)\n\n # Create a function select 20 sec multiple time stamps from 1 sec time stamps\n # t is True if the remainder when divided by 100 (i.e. the last two digits)\n # are in a list [0,20,40,80]\n sec_20_func = lambda t: t % 100 in [0, 20, 40, 60, 80]\n v_sec_20_func = np.vectorize(sec_20_func) # Vectorize the functions\n\n n_time_20 = n_time[v_sec_20_func(n_time)] # Select only timestamps for seconds 0,20,40\n lons_20 = csx(n_time_20).round(8) # Int. Lons for 20 sec time steps, round 8 digits\n lats_20 = csy(n_time_20).round(8) # Int. Lats for 20 sec time steps, round 8 digits\n\n # Put the interpolated data from one ships voyage in a single pd df\n ais_data = pd.DataFrame(data={'Time': n_time_20, 'Latitude': lats_20, 'Longitude': lons_20,\n 'MMSI': ais_one_ship_voy.MMSI.values[0],\n 'Voyage': ais_one_ship_voy.Voyage.values[0]})\n # Put the one ship voyage data of listed ship dimensions and draught in another pd df\n ais_voyages = ais_one_ship_voy[[\"MMSI\", \"Ship_type\", \"L1\", \"L2\", \"B1\", \"B2\", \"Voyage\"]]\n ais_voyages = ais_voyages.drop_duplicates() # Drop duplicate rows from the df\n\n # Append if ship had more than one voyage passed the monitoring location\n if one_voy == ships_voys[0]:\n ais_data_l = ais_data.copy() # Create a df to append first iteration data\n ais_voyages_l = ais_voyages.copy() # Create a df to append first iteration data\n else:\n ais_data_l = ais_data_l.append(ais_data) # Append to the df\n ais_voyages_l = ais_voyages_l.append(ais_voyages) # Append to the df\n return ais_voyages_l, ais_data_l # Return the pd df of ship positions and voyages data only\n\n# locations = ['20', '21', '22', '23']\n# ais_voyages_data, ais_int_data = read_interpolate_ais(locations[3])\n","repo_name":"mirkomustonen/UWSoundCatDataProc","sub_path":"AIS_data_processing/read_interp_ais_data.py","file_name":"read_interp_ais_data.py","file_ext":"py","file_size_in_byte":10449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"10511739738","text":"import discord\nfrom discord.ext import commands\nfrom keep_alive import keep_alive\nfrom mcstatus import MinecraftServer\nimport os\nfrom discord.ext.commands import has_permissions, MissingPermissions\n\n\nbot = commands.Bot(command_prefix='!')\n\nstatus = \"Online\"\n\n@bot.event\nasync def on_ready():\n print('Ready!')\n (print('I am running on ' + bot.user.name))\n await bot.change_presence(activity=discord.Game(name=\"hello i am watching you\"))\n\n@bot.command(description=\"Shows bot latency\")\nasync def ping(ctx):\n await ctx.send(f\"Pong! {format(round(bot.latency, 1))}\")\n \n\n@bot.command(description=\"Kicks a user\")\n@has_permissions(kick_members=True)\nasync def kick(ctx, member : discord.Member , *, reason = None):\n if ctx.message.author.guild_permissions.kick_members:\n if reason == None:\n reason=f'Kicked by {ctx.author}'\n embed = discord.Embed(title=f\"Kicked user {member}\", description=f\"Reason: {reason}\", color = 0xFFFFF)\n await member.kick(reason=reason)\n await ctx.send(embed=embed)\n else:\n await ctx.channel.send('Insufficent Permissions!')\n\n@bot.command(description=\"Banishes a user\")\n@has_permissions(ban_members=True)\nasync def ban(ctx, member : discord.Member , *, reason = None):\n if reason == None:\n reason=f'Banned by {ctx.author}'\n embed = discord.Embed(title=f\"Banned user {member}\", description=f\"Reason: {reason}\", color = 0xFFFFF)\n await member.ban(reason=reason)\n await ctx.send(embed=embed)\n \n@bot.command(description=\"Mutes a user\")\n@has_permissions(manage_messages=True)\nasync def mute(self, ctx, member : discord.Member, *, reason = None):\n guild = ctx.guild\n mutedRole = discord.utils.get(guild.roles, name=\"Muted\")\n\n if not mutedRole:\n mutedRole = await guild.create_role(name=\"Muted\")\n\n for channel in guild.channels:\n await channel.set_permissions(mutedRole, speak=False, send_messages = False)\n await member.add_roles(mutedRole)\n\n embed = discord.Embed(title=f\"\\n\\nSucess!\",description=f\" Muted {member.mention} for {reason}.\", color=0xFFFFF)\n\n author = ctx.message.author\n pfp = author.avatar_url\n\n embed.set_author(name=author, icon_url=pfp)\n await ctx.send(embed=embed)\n\n@bot.command(description=\"Displays some info about a user\")\n\nasync def profile(ctx, member : discord.Member = None):\n if member == None:\n member = ctx.author\n embed = discord.Embed(title=f\"Profile of user {member}\", description=\"\", color=0xFFFFF)\n embed.add_field(name=\"User ID\", value=member.id, inline=True)\n embed.add_field(name=\"Date Joined\", value=member.joined_at, inline=True)\n await ctx.send(embed=embed)\n\n@bot.command(description=\"Mass delete messages!\")\n@has_permissions(manage_messages=True)\nasync def purge(ctx, amount = 1):\n if amount > 100:\n await ctx.message.delete()\n await ctx.channel.send('You cannot purge more than 100 messages!', delete_after=3)\n return\n await ctx.message.delete()\n await ctx.channel.purge(limit=amount)\n embed = discord.Embed(description=f\"Purged {amount} message(s)\", color=0xFFFFF)\n await ctx.send(embed=embed, delete_after=3)\n\n@bot.command(description=\"Say Hi!\")\nasync def hello(ctx):\n await ctx.message.delete()\n await ctx.channel.send(\"Hello! I'm happy to be here. :)\")\n\n@bot.command(description='Displays info about the minecraft server!')\nasync def mcinfo(ctx):\n embed = discord.Embed(title='AzaleaMC Server Info', color=0xFFFFF)\n embed.add_field(name=\"IP\", value=\"play.azaleamc.org\", inline=True)\n embed.add_field(name=\"Status\", value=status, inline=True)\n await ctx.send(embed=embed)\n\n@bot.command(description=\"Warns the user mentioned\")\n@has_permissions(manage_messages=True)\nasync def warn(ctx, member : discord.Member , *, reason=None):\n if reason == None:\n reason=f'Warned by {ctx.author}'\n await member.send(f\"You have been warned for: {reason}\")\n embed = discord.Embed(title=f\"Warned {member} for reason: {reason}\", color=0xFFFFF)\n await ctx.send(embed=embed)\n\n@bot.command(description=\"Shows MC Server Status\")\nasync def status(ctx):\n server = MinecraftServer.lookup(\"184.95.34.74:25636\")\n\n status = server.status()\n embed = discord.Embed(title=\"Server Status\", color=0xFFFFF)\n embed.add_field(name=\"Players Online\", value=f\"{status.players.online}\", inline=False)\n embed.add_field(name=\"Latency (if 0 down for maintenance)\", value = round(status.latency, 1),inline=False)\n\n await ctx.send(embed=embed)\n\n\n@bot.command(description=\"Sets the status\")\n@has_permissions(ban_members=True)\nasync def setstatus(ctx, str):\n status = str\n ctx.send(\"Sucessfully set status!\")\nkeep_alive()\ntoken = os.environ['TOKEN']\nbot.run(token)","repo_name":"reflqct/Daisy","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"44050116410","text":"import os\nimport requests\nfrom datetime import datetime\nfrom backend import send_email\n\nAPI_KEY = os.getenv(\"NEWS_ORG_API_KEY\")\nDATE = datetime.today().strftime(\"%Y-%m-%d\")\nENDPOINT = \"https://newsapi.org/v2/everything\"\nHEADER = {\"X-Api-Key\": API_KEY}\n\n\ndef get_news(topic: str, date, num_results=5):\n \"\"\"Gets the specified number of most recent internship news from news_org\"\"\"\n params = {\n \"q\": topic,\n \"date\": date,\n \"pageSize\": num_results\n }\n request = requests.get(url=ENDPOINT, headers=HEADER, params=params)\n request.raise_for_status()\n response = request.json()\n news_list = (response[\"articles\"])\n return news_list\n\n\ndef main():\n articles = get_news(topic=\"internships\", date=DATE)\n news_result = [\n {\n \"title\": article[\"title\"],\n \"url\": article[\"url\"],\n \"description\": article[\"description\"]\n } for article in articles\n ]\n\n messages = []\n for news in news_result:\n title = news[\"title\"]\n url = news[\"url\"]\n description = news[\"description\"]\n message = f\"\"\"\n <html>\n <body>\n <h2>HEADLINE: {title}</h2>\n <p>{description}</p>\n <p>Read more: <a href=\"{url}\">{url}</a></p>\n </body>\n </html>\n \"\"\"\n messages.append(message)\n\n full_message = \"\\n\".join(messages)\n send_email(full_message)\n\nif __name__ == \"__main__\":\n main()","repo_name":"Earnstein/python-fiesta","sub_path":"UpdateFlow/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"43150055858","text":"from __future__ import absolute_import\nfrom builtins import zip\nfrom builtins import map\nfrom builtins import range\n\n###############################################################################\n# lazyflow: data flow based lazy parallel computation framework\n#\n# Copyright (C) 2011-2014, the ilastik developers\n# <team@ilastik.org>\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the Lesser GNU General Public License\n# as published by the Free Software Foundation; either version 2.1\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# See the files LICENSE.lgpl2 and LICENSE.lgpl3 for full text of the\n# GNU Lesser General Public License version 2.1 and 3 respectively.\n# This information is also available on the ilastik web site at:\n# http://ilastik.org/license/\n###############################################################################\n# Python\nimport logging\n\ntraceLogger = logging.getLogger(\"TRACE.\" + __name__)\n\n# SciPy\nimport numpy\n\n# lazyflow\nfrom lazyflow.graph import Operator, InputSlot, OutputSlot, OrderedSignal, OperatorWrapper\nfrom lazyflow.roi import sliceToRoi, roiToSlice, getIntersection, roiFromShape, nonzero_bounding_box, enlargeRoiForHalo\nfrom lazyflow.utility import Timer\nfrom lazyflow.classifiers import (\n LazyflowVectorwiseClassifierABC,\n LazyflowVectorwiseClassifierFactoryABC,\n LazyflowPixelwiseClassifierABC,\n LazyflowPixelwiseClassifierFactoryABC,\n)\n\n\nfrom lazyflow.operators.opFeatureMatrixCache import OpFeatureMatrixCache\nfrom lazyflow.utility.helpers import bigintprod\nfrom .utils import slic_to_mask\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass OpTrainSupervoxelClassifierBlocked(Operator):\n \"\"\"\n Owns two child training operators, for 'vectorwise' and 'pixelwise' classifier types.\n Chooses which one to use based on the type of ClassifierFactory provided as input.\n \"\"\"\n\n Images = InputSlot(level=1)\n SupervoxelSegmentation = InputSlot(level=1)\n Labels = InputSlot(level=1)\n SupervoxelFeatures = InputSlot(level=1)\n SupervoxelLabels = InputSlot(level=1)\n ClassifierFactory = InputSlot()\n nonzeroLabelBlocks = InputSlot(level=1) # Used only in the pixelwise case.\n\n Classifier = OutputSlot()\n\n def __init__(self, *args, **kwargs):\n super(OpTrainSupervoxelClassifierBlocked, self).__init__(*args, **kwargs)\n self.progressSignal = OrderedSignal()\n self._mode = None\n\n # Fully connect the vectorwise training operator\n self._opVectorwiseTrain = OpTrainSupervoxelwiseClassifierBlocked(parent=self)\n self._opVectorwiseTrain.Images.connect(self.Images)\n self._opVectorwiseTrain.SupervoxelSegmentation.connect(self.SupervoxelSegmentation)\n self._opVectorwiseTrain.SupervoxelFeatures.connect(self.SupervoxelFeatures)\n self._opVectorwiseTrain.SupervoxelLabels.connect(self.SupervoxelLabels)\n self._opVectorwiseTrain.Labels.connect(self.Labels)\n self._opVectorwiseTrain.ClassifierFactory.connect(self.ClassifierFactory)\n self._opVectorwiseTrain.progressSignal.subscribe(self.progressSignal)\n\n # # Fully connect the pixelwise training operator\n # self._opPixelwiseTrain = OpTrainPixelwiseClassifierBlocked(parent=self)\n # self._opPixelwiseTrain.Images.connect(self.Images)\n # self._opPixelwiseTrain.Labels.connect(self.Labels)\n # self._opPixelwiseTrain.ClassifierFactory.connect(self.ClassifierFactory)\n # self._opPixelwiseTrain.nonzeroLabelBlocks.connect(self.nonzeroLabelBlocks)\n # self._opPixelwiseTrain.MaxLabel.connect(self.MaxLabel)\n # self._opPixelwiseTrain.progressSignal.subscribe(self.progressSignal)\n\n def setupOutputs(self):\n # Construct an inner operator depending on the type of classifier we'll be creating.\n classifier_factory = self.ClassifierFactory.value\n if issubclass(type(classifier_factory), LazyflowVectorwiseClassifierFactoryABC):\n new_mode = \"vectorwise\"\n else:\n raise Exception(\"Unknown classifier factory type: {}\".format(type(classifier_factory)))\n\n if new_mode == self._mode:\n return\n\n self.Classifier.disconnect()\n self._mode = new_mode\n\n if self._mode == \"vectorwise\":\n self.Classifier.connect(self._opVectorwiseTrain.Classifier)\n elif self._mode == \"pixelwise\":\n raise RuntimeError(\"shouldn't be here\")\n # self.Classifier.connect(self._opPixelwiseTrain.Classifier)\n\n def execute(self, slot, subindex, roi, result):\n assert False, \"Shouldn't get here...\"\n\n def propagateDirty(self, slot, subindex, roi):\n pass\n\n\nclass OpTrainSupervoxelwiseClassifierBlocked(Operator):\n Images = InputSlot(level=1)\n SupervoxelSegmentation = InputSlot(level=1)\n Labels = InputSlot(level=1)\n SupervoxelFeatures = InputSlot(level=1)\n SupervoxelLabels = InputSlot(level=1)\n ClassifierFactory = InputSlot()\n\n Classifier = OutputSlot()\n\n # Images[N] --- MaxLabel ------\n # \\ \\\n # Labels[N] --> opFeatureMatrixCaches ---(FeatureImage[N])---> opConcatenateFeatureImages ---(label+feature matrix)---> OpTrainFromFeatures ---(Classifier)--->\n\n def __init__(self, *args, **kwargs):\n super(OpTrainSupervoxelwiseClassifierBlocked, self).__init__(*args, **kwargs)\n self.progressSignal = OrderedSignal()\n\n self._opFeatureMatrixCaches = OperatorWrapper(OpFeatureMatrixCache, parent=self)\n self._opFeatureMatrixCaches.LabelImage.connect(self.Labels)\n self._opFeatureMatrixCaches.FeatureImage.connect(self.Images)\n\n # self._opConcatenateFeatureMatrices = OpConcatenateFeatureMatrices(parent=self)\n # self._opConcatenateFeatureMatrices.FeatureMatrices.connect(self._opFeatureMatrixCaches.LabelAndFeatureMatrix)\n # self._opConcatenateFeatureMatrices.ProgressSignals.connect(self._opFeatureMatrixCaches.ProgressSignal)\n\n self._opTrainFromFeatures = OpTrainClassifierFromFeatureVectorsAndSupervoxelMask(parent=self)\n self._opTrainFromFeatures.ClassifierFactory.connect(self.ClassifierFactory)\n self._opTrainFromFeatures.LabelAndFeatureMatrix.connect(self._opFeatureMatrixCaches.LabelAndFeatureMatrix)\n self._opTrainFromFeatures.SupervoxelSegmentation.connect(self.SupervoxelSegmentation)\n self._opTrainFromFeatures.SupervoxelFeatures.connect(self.SupervoxelFeatures)\n self._opTrainFromFeatures.SupervoxelLabels.connect(self.SupervoxelLabels)\n self._opTrainFromFeatures.Labels.connect(self.Labels)\n self._opTrainFromFeatures.Images.connect(self.Images)\n\n self.Classifier.connect(self._opTrainFromFeatures.Classifier)\n\n # Progress reporting\n # def _handleFeatureProgress(progress):\n # # Note that these progress messages will probably appear out-of-order.\n # # See comments in OpFeatureMatrixCache\n # logger.debug(\"Training: {:02}% (Computing features)\".format(int(progress)))\n # self.progressSignal(0.8*progress)\n # self._opConcatenateFeatureMatrices.progressSignal.subscribe(_handleFeatureProgress)\n\n def _handleTrainingComplete():\n logger.debug(\"Training: 100% (Complete)\")\n self.progressSignal(100.0)\n\n self._opTrainFromFeatures.trainingCompleteSignal.subscribe(_handleTrainingComplete)\n\n def cleanUp(self):\n self.progressSignal.clean()\n self.Classifier.disconnect()\n super(OpTrainSupervoxelwiseClassifierBlocked, self).cleanUp()\n\n def setupOutputs(self):\n pass # Nothing to do; our output is connected to an internal operator.\n\n def execute(self, slot, subindex, roi, result):\n assert False, \"Shouldn't get here...\"\n\n def propagateDirty(self, slot, subindex, roi):\n pass\n\n\nclass OpTrainClassifierFromFeatureVectorsAndSupervoxelMask(Operator):\n ClassifierFactory = InputSlot()\n LabelAndFeatureMatrix = InputSlot(level=1)\n SupervoxelSegmentation = InputSlot(level=1)\n Labels = InputSlot(level=1)\n Images = InputSlot(level=1)\n SupervoxelFeatures = InputSlot(level=1)\n SupervoxelLabels = InputSlot(level=1)\n\n Classifier = OutputSlot()\n\n def __init__(self, *args, **kwargs):\n super(OpTrainClassifierFromFeatureVectorsAndSupervoxelMask, self).__init__(*args, **kwargs)\n self.trainingCompleteSignal = OrderedSignal()\n\n # TODO: Progress...\n # self.progressSignal = OrderedSignal()\n\n def setupOutputs(self):\n self.Classifier.meta.dtype = object\n self.Classifier.meta.shape = (1,)\n\n # Special metadata for downstream operators using the classifier\n self.Classifier.meta.classifier_factory = self.ClassifierFactory.value\n\n def execute(self, slot, subindex, roi, result):\n channel_names = self.LabelAndFeatureMatrix.meta.channel_names\n # labels_and_features = self.LabelAndFeatureMatrix[0].value\n # featMatrix = labels_and_features[:, 1:]\n # labelsMatrix = labels_and_features[:, 0:1].astype(numpy.uint32)\n supervoxelSegmentationMatrix = self.SupervoxelSegmentation[0].value\n\n # if featMatrix.shape[0] < maxLabel:\n # # If there isn't enough data for the random forest to train with, return None\n # result[:] = None\n # self.trainingCompleteSignal()\n # return\n\n classifier_factory = self.ClassifierFactory.value\n assert issubclass(type(classifier_factory), LazyflowVectorwiseClassifierFactoryABC), (\n \"Factory is of type {}, which does not satisfy the LazyflowVectorwiseClassifierFactoryABC interface.\"\n \"\".format(type(classifier_factory))\n )\n\n # featMatrix = featMatrix.reshape(list(self.Images[0].value.shape[:3])+[featMatrix.shape[1]])\n # supervoxelFeatures = get_supervoxel_features(self.Images[0].value, supervoxelSegmentationMatrix)\n # supervoxelLabels = get_supervoxel_labels(self.Labels[0].value, supervoxelSegmentationMatrix)\n # indices = numpy.arange(supervoxelFeatures.shape[0])\n\n supervoxelFeatures = self.SupervoxelFeatures[0].value\n supervoxelLabels = self.SupervoxelLabels[0].value\n mask = numpy.where(supervoxelLabels != 0)\n supervoxelFeatures = supervoxelFeatures[mask]\n supervoxelLabels = supervoxelLabels[mask]\n # import ipdb; ipdb.set_trace()\n # print(\"features before training {}\".format(supervoxelFeatures))\n classifier = classifier_factory.create_and_train(supervoxelFeatures, supervoxelLabels, channel_names)\n result[0] = classifier\n if classifier is not None:\n assert issubclass(type(classifier), LazyflowVectorwiseClassifierABC), (\n \"Classifier is of type {}, which does not satisfy the LazyflowVectorwiseClassifierABC interface.\"\n \"\".format(type(classifier))\n )\n\n self.trainingCompleteSignal()\n return result\n\n def propagateDirty(self, slot, subindex, roi):\n self.Classifier.setDirty()\n\n\nclass OpSupervoxelClassifierPredict(Operator):\n Image = InputSlot()\n LabelsCount = InputSlot()\n Classifier = InputSlot()\n SupervoxelSegmentation = InputSlot()\n\n # An entire prediction request is skipped if the mask is all zeros for the requested roi.\n # Otherwise, the request is serviced as usual and the mask is ignored.\n PredictionMask = InputSlot(optional=True)\n SupervoxelFeatures = InputSlot()\n\n PMaps = OutputSlot()\n\n def __init__(self, *args, **kwargs):\n super(OpSupervoxelClassifierPredict, self).__init__(*args, **kwargs)\n self._mode = None\n self._prediction_op = None\n\n def setupOutputs(self):\n # Construct an inner operator depending on the type of classifier we'll be using.\n # We don't want to access the classifier directly here because that would trigger the full computation already.\n # Instead, we require the factory to be passed along with the classifier metadata.\n\n try:\n classifier_factory = self.Classifier.meta.classifier_factory\n except KeyError:\n raise Exception(\"Classifier slot must include classifier factory as metadata.\")\n\n if issubclass(classifier_factory.__class__, LazyflowVectorwiseClassifierFactoryABC):\n new_mode = \"vectorwise\"\n # elif issubclass(classifier_factory.__class__, LazyflowPixelwiseClassifierFactoryABC):\n # new_mode = 'pixelwise'\n else:\n raise Exception(\"Unknown classifier factory type: {}\".format(type(classifier_factory)))\n\n if new_mode == self._mode:\n return\n\n if self._mode is not None:\n self.PMaps.disconnect()\n self._prediction_op.cleanUp()\n self._mode = new_mode\n\n if self._mode == \"vectorwise\":\n self._prediction_op = OpSupervoxelwiseClassifierPredict(parent=self)\n elif self._mode == \"pixelwise\":\n raise RuntimeError(\"shouldn't be here\")\n\n self._prediction_op.PredictionMask.connect(self.PredictionMask)\n self._prediction_op.Image.connect(self.Image)\n self._prediction_op.LabelsCount.connect(self.LabelsCount)\n self._prediction_op.Classifier.connect(self.Classifier)\n self._prediction_op.SupervoxelSegmentation.connect(self.SupervoxelSegmentation)\n self._prediction_op.SupervoxelFeatures.connect(self.SupervoxelFeatures)\n self.PMaps.connect(self._prediction_op.PMaps)\n\n def execute(self, slot, subindex, roi, result):\n assert False, \"Shouldn't get here...\"\n\n def propagateDirty(self, slot, subindex, roi):\n if slot == self.Classifier:\n self.PMaps.setDirty()\n\n\nclass OpSupervoxelwiseClassifierPredict(Operator):\n Image = InputSlot()\n LabelsCount = InputSlot()\n Classifier = InputSlot()\n SupervoxelSegmentation = InputSlot()\n SupervoxelFeatures = InputSlot()\n\n # An entire prediction request is skipped if the mask is all zeros for the requested roi.\n # Otherwise, the request is serviced as usual and the mask is ignored.\n PredictionMask = InputSlot(optional=True)\n\n PMaps = OutputSlot()\n\n def __init__(self, *args, **kwargs):\n super(OpSupervoxelwiseClassifierPredict, self).__init__(*args, **kwargs)\n\n # Make sure the entire image is dirty if the prediction mask is removed.\n self.PredictionMask.notifyUnready(lambda s: self.PMaps.setDirty())\n\n def setupOutputs(self):\n assert self.Image.meta.getAxisKeys()[-1] == \"c\"\n\n nlabels = max(self.LabelsCount.value, 1) # we'll have at least 2 labels once we actually predict something\n # not setting it to 0 here is friendlier to possible downstream\n # ilastik operators, setting it to 2 causes errors in pixel classification\n # (live prediction doesn't work when only two labels are present)\n\n self.PMaps.meta.assignFrom(self.Image.meta)\n self.PMaps.meta.dtype = numpy.float32\n self.PMaps.meta.shape = self.Image.meta.shape[:-1] + (\n nlabels,\n ) # FIXME: This assumes that channel is the last axis\n self.PMaps.meta.drange = (0.0, 1.0)\n\n self.Image.meta.ideal_blockshape = self.Image.meta.shape\n self.PMaps.meta.ideal_blockshape = self.Image.meta.shape\n # ideal_blockshape = self.Image.meta.ideal_blockshape\n # if ideal_blockshape is None:\n # ideal_blockshape = (0,) * len(self.Image.meta.shape)\n # ideal_blockshape = list(ideal_blockshape)\n # ideal_blockshape[-1] = self.PMaps.meta.shape[-1]\n # self.PMaps.meta.ideal_blockshape = tuple(ideal_blockshape)\n\n output_channels = nlabels\n input_channels = self.Image.meta.shape[-1]\n # Temporarily consumed RAM includes the following:\n # >> result array: 4 * N output_channels\n # >> (times 2 due to temporary variable)\n # >> input data allocation\n classifier_factory = self.Classifier.meta.classifier_factory\n classifier_ram_per_pixelchannel = classifier_factory.estimated_ram_usage_per_requested_predictionchannel()\n classifier_ram_per_pixel = classifier_ram_per_pixelchannel * output_channels\n feature_ram_per_pixel = max(self.Image.meta.dtype().nbytes, 4) * input_channels\n self.PMaps.meta.ram_usage_per_requested_pixel = classifier_ram_per_pixel + feature_ram_per_pixel\n\n def execute(self, slot, subindex, roi, result):\n classifier = self.Classifier.value\n\n # Training operator may return 'None' if there was no data to train with\n skip_prediction = classifier is None\n\n # Shortcut: If the mask is totally zero, skip this request entirely\n if not skip_prediction and self.PredictionMask.ready():\n mask_roi = numpy.array((roi.start, roi.stop))\n mask_roi[:, -1:] = [[0], [1]]\n start, stop = list(map(tuple, mask_roi))\n mask = self.PredictionMask(start, stop).wait()\n skip_prediction = not numpy.any(mask)\n del mask\n\n if skip_prediction:\n result[:] = 0.0\n return result\n\n assert issubclass(\n type(classifier), LazyflowVectorwiseClassifierABC\n ), \"Classifier is of type {}, which does not satisfy the LazyflowVectorwiseClassifierABC interface.\".format(\n type(classifier)\n )\n\n key = roi.toSlice()\n newKey = key[:-1]\n newKey += (slice(0, self.Image.meta.shape[-1], None),)\n\n with Timer() as features_timer:\n input_data = self.Image[newKey].wait()\n\n input_data = numpy.asarray(input_data, numpy.float32)\n\n shape = input_data.shape\n prod = bigintprod(shape[:-1])\n features = input_data.reshape((prod, shape[-1]))\n features = self.SupervoxelFeatures.value\n # print(\"features before prediction {}\".format(features))\n # features = get_supervoxel_features(features, self.SupervoxelSegmentation.value)\n # import ipdb; ipdb.set_trace()\n with Timer() as prediction_timer:\n probabilities = classifier.predict_probabilities(features)\n # import ipdb; ipdb.set_trace()\n probabilities = slic_to_mask(self.SupervoxelSegmentation.value, probabilities).reshape(\n -1, probabilities.shape[-1]\n )\n logger.debug(\n \"Features took {} seconds, Prediction took {} seconds for roi: {} : {}\".format(\n features_timer.seconds(), prediction_timer.seconds(), roi.start, roi.stop\n )\n )\n\n assert probabilities.shape[1] <= self.PMaps.meta.shape[-1], (\n \"Error: Somehow the classifier has more label classes than expected:\"\n \" Got {} classes, expected {} classes\".format(probabilities.shape[1], self.PMaps.meta.shape[-1])\n )\n\n # We're expecting a channel for each label class.\n # If we didn't provide at least one sample for each label,\n # we may get back fewer channels.\n if probabilities.shape[1] < self.PMaps.meta.shape[-1]:\n # Copy to an array of the correct shape\n # This is slow, but it's an unusual case\n assert probabilities.shape[-1] == len(classifier.known_classes)\n full_probabilities = numpy.zeros(\n probabilities.shape[:-1] + (self.PMaps.meta.shape[-1],), dtype=numpy.float32\n )\n for i, label in enumerate(classifier.known_classes):\n full_probabilities[:, label - 1] = probabilities[:, i]\n\n probabilities = full_probabilities\n\n # Reshape to image\n probabilities.shape = shape[:-1] + (self.PMaps.meta.shape[-1],)\n\n # Copy only the prediction channels the client requested.\n result[...] = probabilities[..., roi.start[-1] : roi.stop[-1]]\n return result\n\n def propagateDirty(self, slot, subindex, roi):\n if slot == self.Classifier:\n self.logger.debug(\"classifier changed, setting dirty\")\n self.PMaps.setDirty()\n elif slot in [self.Image, self.PredictionMask, self.SupervoxelFeatures, self.SupervoxelSegmentation]:\n self.PMaps.setDirty()\n","repo_name":"ilastik/ilastik","sub_path":"ilastik/workflows/voxelSegmentation/classifierOperators.py","file_name":"classifierOperators.py","file_ext":"py","file_size_in_byte":20632,"program_lang":"python","lang":"en","doc_type":"code","stars":312,"dataset":"github-code","pt":"94"} +{"seq_id":"12451840363","text":"'''\nCreated on Jul 25, 2017\n\n@author: rlakkimsetti\n'''\nimport sys\n\nfrom xml.dom import minidom\nfrom xml.etree import ElementTree\nfrom xml.etree.ElementTree import Element, SubElement, Comment, tostring\n#from OFTE import TimerCheck\nfrom OFTE import TimerCheck\nfrom OFTE import New\nd={}\nsys.argv = raw_input('Enter command line arguments: ').split()\nfor i in range(len(sys.argv)):\n #print(\"in loop\")\n if(str(sys.argv[i]) == '-dd'):\n destinationDirectory=sys.argv[i+1]\n d.update({'destinationDirectory': destinationDirectory})\n sourceFile=sys.argv[i+2]\n d.update({'sourceFile': sourceFile})\n elif(str(sys.argv[i]) == '-df'):\n destiationFile=sys.argv[i+1]\n d.update({'destiationFile':destiationFile})\n sourceFile=sys.argv[i+2]\n d.update({'sourceFile':sourceFile})\n elif(str(sys.argv[i]) == '-tr'):\n triggerPattern=sys.argv[i+1]\n d.update({'triggerPattern':triggerPattern})\n elif(str(sys.argv[i]) == '-trd'):\n triggerCondition=sys.argv[i+1]\n d.update({'triggerCondition':triggerCondition})\n elif(str(sys.argv[i]) == '-pi'):\n pollInterval=sys.argv[i+1]\n d.update({'pollInterval':pollInterval})\n elif(str(sys.argv[i]) == '-pu'):\n pollUnits=sys.argv[i+1]\n d.update({'pollUnits':pollUnits})\n elif(str(sys.argv[i]) == '-jn'):\n jobName=sys.argv[i+1]\n d.update({'jobName':jobName})\n elif(str(sys.argv[i]) == '-gt'):\n xmlFilePath=sys.argv[i+1]\n d.update({'xmlFilePath':xmlFilePath})\n \nprint(d)\n\ndef prettif(elem):\n \"\"\"Return a pretty-printed XML string for the Element.\n \"\"\"\n rough_string = ElementTree.tostring(elem, 'utf-8')\n reparsed = minidom.parseString(rough_string)\n return reparsed.toprettyxml(indent=\" \")\n\nManagedCall = Element('managedCall')\n\noriginator = SubElement(ManagedCall, 'originator')\n\nhostName = SubElement(originator, 'hostName')\nhostName.text = 'localhost'\n\nuserId = SubElement(originator, 'userId')\nuserId.text = 'miracle'\n\nTransferSet = SubElement(ManagedCall, 'transferSet')\n\nItem = SubElement(TransferSet, 'item')\n\nSource = SubElement(Item, 'source')\n\nSourceFile = SubElement (Source, 'file')\nif 'sourceFile' in d.keys():\n SourceFile.text = d.get('sourceFile')\nSourceFilePattern = SubElement (Source, 'filePattern')\nif 'filePattern' in d.keys():\n SourceFilePattern.text = d.get('filePattern')\nSourceTriggerPattern = SubElement(Source, 'triggerPattern')\nif 'triggerPattern' in d.keys():\n SourceTriggerPattern.text = d.get('triggerPattern')\n\nDestination = SubElement(Item, 'destination')\nDestinationFile = SubElement (Destination, 'file')\nif 'destinationDirectory' in d.keys():\n DestinationFile.text = d.get('destinationDirectory')\nDestinationFilePattern = SubElement (Destination, 'filePatternd')\nif 'filePatternd' in d.keys():\n DestinationFilePattern.text = d.get('filePatternd')\nDestinationTriggerPattern = SubElement(Destination, 'triggerPatternd')\nif 'triggerCondition' in d.keys():\n DestinationTriggerPattern.text = d.get('triggerCondition')\n\nPoll = SubElement(TransferSet, 'poll')\nUnits = SubElement(Poll, 'pollUnits')\nif 'pollUnits' in d.keys():\n Units.text = d.get('pollUnits')\nInterval = SubElement(Poll, 'pollInterval')\nif 'pollInterval' in d.keys():\n Interval.text = d.get('pollInterval')\n\nJob = SubElement(ManagedCall, 'job')\nJobName = SubElement(Job, 'jobName')\nif 'jobName' in d.keys():\n JobName.text = d.get('jobName')\n\nif __name__ == '__main__':\n # print(tostring(ManagedCall))\n file = open(\"D:\\Test\\PythonMonitors\\monitortest6.xml\", \"w\")\n file.write(prettif(ManagedCall))\n file.close()\n print (prettif(ManagedCall))\n #pass\n # TimerCheck.timer(d)\n New.timer(d)","repo_name":"MithunThadi/OFTE","sub_path":"SourceCode/python_ofte frame work/ConsoleParameter2.py","file_name":"ConsoleParameter2.py","file_ext":"py","file_size_in_byte":3728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"9231049571","text":"import gtsam\nfrom DataSets.extractData import ROSData\nfrom DataSets.extractGt import GroundTruthEstimates\nfrom settings import DATASET_NUMBER\nimport numpy as np\nfrom scipy.spatial.transform import Rotation as R\nfrom Sensors.CameraSensor.visualOdometry import VisualOdometry\nfrom Plotting.plot_gtsam import plot_position, plot_angels, plot_threedof\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\nclass CameraUwbImuFusion:\n\n def __init__(self) -> None:\n self.dataset = ROSData(DATASET_NUMBER)\n self.ground_truth: GroundTruthEstimates = GroundTruthEstimates(DATASET_NUMBER, pre_initialization=False)\n\n # Tracked variables for IMU and UWB\n self.time_stamps: list = []\n\n # Setting up gtsam values\n self.graph_values: gtsam.Values = gtsam.Values()\n self.factor_graph: gtsam.NonlinearFactorGraph = gtsam.NonlinearFactorGraph()\n self.initialize_graph()\n sns.set()\n\n def initialize_graph(self):\n\n # Pose for pre init\n R_init = R.from_euler(\"xyz\", self.ground_truth.initial_pose(voBruteForce=True)[:3], degrees=False)\n T_init = self.ground_truth.initial_pose(voBruteForce=True)[3:]\n T_init[2] = -0.7\n\n self.initial_state = np.eye(4)\n self.initial_state[:3, :3] = R_init.as_matrix()\n self.initial_state[:3, 3] = T_init.T\n\n # Initialize vo\n self.visual_odometry = VisualOdometry()\n self.visual_odometry.update_scale(0.25)\n\n def run(self):\n iteration_number = 0\n iteration_number_cam = 0\n\n for measurement in self.dataset.generate_measurements():\n\n if measurement.measurement_type.value == \"Camera\":\n self.visual_odometry.track(measurement.image)\n self.time_stamps.append(measurement.time.to_time())\n iteration_number_cam += 1\n print(iteration_number_cam)\n\n iteration_number += 1\n\n if iteration_number_cam > 2000:\n break\n\n states = self.visual_odometry.states\n\n for i in range(len(states)):\n states[i] = self.initial_state @ states[i]\n\n north = np.array([states[i][0, 3] for i in range(len(states))])\n east = np.array([states[i][1, 3] for i in range(len(states))])\n down = np.array([states[i][2, 3] for i in range(len(states))])\n\n roll = np.array([R.from_matrix(states[i][:3, :3]).as_euler(\"xyz\")[0] for i in range(len(states))])\n pitch = np.array([R.from_matrix(states[i][:3, :3]).as_euler(\"xyz\")[1] for i in range(len(states))])\n yaw = np.array([R.from_matrix(states[i][:3, :3]).as_euler(\"xyz\")[2] for i in range(len(states))])\n\n # plt.figure(2)\n #plot_position(np.array([north, east, down]).T, self.ground_truth, self.time_stamps, convert_NED=False)\n # plt.figure(3)\n #plot_angels(np.array([roll, pitch, yaw]).T, self.ground_truth, self.time_stamps)\n plot_threedof(np.array([north, east, down]).T, np.array([roll, pitch, yaw]).T, self.ground_truth, self.time_stamps)\n plt.show()\n\n\nfusion = CameraUwbImuFusion()\nfusion.run()\n","repo_name":"iverau/sensorfusionUWB","sub_path":"testVO.py","file_name":"testVO.py","file_ext":"py","file_size_in_byte":3103,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"73769984948","text":"from scipy.spatial import ConvexHull\nimport torch\nimport torch.nn.functional as F\nimport numpy as np\nfrom tqdm import tqdm \nimport re, itertools\nimport contextlib\n\ndef normalize_kp(kp_source, kp_driving, kp_driving_initial, adapt_movement_scale=False,\n use_relative_movement=False, use_relative_jacobian=False):\n if adapt_movement_scale:\n source_area = ConvexHull(kp_source['value'][0].data.cpu().numpy()).volume\n driving_area = ConvexHull(kp_driving_initial['value'][0].data.cpu().numpy()).volume\n adapt_movement_scale = np.sqrt(source_area) / np.sqrt(driving_area)\n else:\n adapt_movement_scale = 1\n\n kp_new = {k: v for k, v in kp_driving.items()}\n\n if use_relative_movement:\n kp_value_diff = (kp_driving['value'] - kp_driving_initial['value'])\n kp_value_diff *= adapt_movement_scale\n kp_new['value'] = kp_value_diff + kp_source['value']\n\n if use_relative_jacobian:\n jacobian_diff = torch.matmul(kp_driving['jacobian'], torch.inverse(kp_driving_initial['jacobian']))\n kp_new['jacobian'] = torch.matmul(jacobian_diff, kp_source['jacobian'])\n\n return kp_new\n\ndef headpose_pred_to_degree(pred):\n device = pred.device\n idx_tensor = [idx for idx in range(66)]\n idx_tensor = torch.FloatTensor(idx_tensor).type_as(pred).to(device)\n pred = F.softmax(pred)\n degree = torch.sum(pred*idx_tensor, 1) * 3 - 99\n return degree\n\ndef get_rotation_matrix(yaw, pitch, roll):\n yaw = yaw / 180 * 3.14\n pitch = pitch / 180 * 3.14\n roll = roll / 180 * 3.14\n\n roll = roll.unsqueeze(1)\n pitch = pitch.unsqueeze(1)\n yaw = yaw.unsqueeze(1)\n\n pitch_mat = torch.cat([torch.ones_like(pitch), torch.zeros_like(pitch), torch.zeros_like(pitch), \n torch.zeros_like(pitch), torch.cos(pitch), -torch.sin(pitch),\n torch.zeros_like(pitch), torch.sin(pitch), torch.cos(pitch)], dim=1)\n pitch_mat = pitch_mat.view(pitch_mat.shape[0], 3, 3)\n\n yaw_mat = torch.cat([torch.cos(yaw), torch.zeros_like(yaw), torch.sin(yaw), \n torch.zeros_like(yaw), torch.ones_like(yaw), torch.zeros_like(yaw),\n -torch.sin(yaw), torch.zeros_like(yaw), torch.cos(yaw)], dim=1)\n yaw_mat = yaw_mat.view(yaw_mat.shape[0], 3, 3)\n\n roll_mat = torch.cat([torch.cos(roll), -torch.sin(roll), torch.zeros_like(roll), \n torch.sin(roll), torch.cos(roll), torch.zeros_like(roll),\n torch.zeros_like(roll), torch.zeros_like(roll), torch.ones_like(roll)], dim=1)\n roll_mat = roll_mat.view(roll_mat.shape[0], 3, 3)\n\n rot_mat = torch.einsum('bij,bjk,bkm->bim', pitch_mat, yaw_mat, roll_mat)\n\n return rot_mat\n\ndef keypoint_transformation(kp_canonical, he, wo_exp=False):\n kp = kp_canonical['value'] # (bs, k, 3) \n yaw, pitch, roll= he['yaw'], he['pitch'], he['roll'] \n yaw = headpose_pred_to_degree(yaw) \n pitch = headpose_pred_to_degree(pitch)\n roll = headpose_pred_to_degree(roll)\n\n if 'yaw_in' in he:\n yaw = he['yaw_in']\n if 'pitch_in' in he:\n pitch = he['pitch_in']\n if 'roll_in' in he:\n roll = he['roll_in']\n\n rot_mat = get_rotation_matrix(yaw, pitch, roll) # (bs, 3, 3)\n\n t, exp = he['t'], he['exp']\n if wo_exp:\n exp = exp*0 \n \n # keypoint rotation\n kp_rotated = torch.einsum('bmp,bkp->bkm', rot_mat, kp)\n\n # keypoint translation\n t[:, 0] = t[:, 0]*0\n t[:, 2] = t[:, 2]*0\n t = t.unsqueeze(1).repeat(1, kp.shape[1], 1)\n kp_t = kp_rotated + t\n\n # add expression deviation \n exp = exp.view(exp.shape[0], -1, 3)\n kp_transformed = kp_t + exp\n\n return {'value': kp_transformed}\n\n\n\ndef make_animation(source_image, source_semantics, target_semantics,\n generator, kp_detector, he_estimator, mapping, \n yaw_c_seq=None, pitch_c_seq=None, roll_c_seq=None,\n use_exp=True, use_half=False, rank=0, p_num=1, bf16=False):\n print(f\"rank, p_num: {rank}, {p_num}\")\n with torch.no_grad():\n predictions = []\n import time\n import os\n #with torch.cpu.amp.autocast():\n start_time = time.time()\n kp_canonical = kp_detector(source_image)\n end_time = time.time()\n print(\"[kp_detector]:\")\n print(end_time - start_time)\n start_time = end_time\n he_source = mapping(source_semantics)\n end_time = time.time()\n print(\"[mapping]:\")\n print(end_time - start_time)\n start_time = end_time\n kp_source = keypoint_transformation(kp_canonical, he_source)\n end_time = time.time()\n print(end_time - start_time)\n for frame_idx in tqdm(range(target_semantics.shape[1]), 'Face Renderer:'):\n if frame_idx % p_num != rank:\n continue\n target_semantics_frame = target_semantics[:, frame_idx]\n he_driving = mapping(target_semantics_frame)\n if yaw_c_seq is not None:\n he_driving['yaw_in'] = yaw_c_seq[:, frame_idx]\n if pitch_c_seq is not None:\n he_driving['pitch_in'] = pitch_c_seq[:, frame_idx]\n if roll_c_seq is not None:\n he_driving['roll_in'] = roll_c_seq[:, frame_idx]\n kp_driving = keypoint_transformation(kp_canonical, he_driving)\n kp_norm = kp_driving\n with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloat16, cache_enabled=True) if bf16 else contextlib.nullcontext():\n out = generator(source_image, kp_source=kp_source, kp_driving=kp_norm)\n # print(f\"{rank}:{frame_idx}\")\n #print(out['prediction'])\n predictions.append(out['prediction'])\n folder_name = \"logs\"\n file_name = f'{p_num}_{rank}.npz'\n if not os.path.exists(folder_name):\n os.makedirs(folder_name)\n file_path = os.path.join(folder_name, file_name)\n f = open(file_path, \"w\")\n np.savez(file_path, *predictions)\n f.close()\n # master process will be pending here to collect all the predictions\n # ... pending ...\n def atoi(text):\n return int(text) if text.isdigit() else text\n def natural_keys(text):\n return [ atoi(c) for c in re.split(r'(\\d+)', text) ]\n if rank == 0:\n while len(os.listdir(folder_name)) < p_num:\n time.sleep(0.2)\n # load all the npz arrays, merge by sequence\n #npz_file_paths = sorted(os.listdir(folder_name))\n npz_file_paths=os.listdir(folder_name)\n npz_file_paths.sort(key=natural_keys)\n print(\"start to merge...\")\n print(f\"npz_file_paths: {npz_file_paths}\")\n else:\n # exit(0)\n return None\n aggregated_lst = []\n for npz_file_path in npz_file_paths:\n npz_file = np.load(os.path.join(folder_name, npz_file_path))\n aggregated_lst.append([npz_file[i] for i in npz_file.files])\n #aggregated_predictions = [torch.from_numpy(x) for y in zip(*aggregated_lst) for x in y]\n # agg lst elements may have different length!\n #exit(0)\n padded_preds = [x for y in itertools.zip_longest(*aggregated_lst) for x in y]\n print(\"padded preds length:\")\n print(len(padded_preds))\n aggregated_predictions = [torch.from_numpy(i) for i in padded_preds if i is not None]\n #predictions_ts = torch.stack(predictions, dim=1)\n\n predictions_ts = torch.stack(aggregated_predictions, dim=1)\n return predictions_ts\n\nclass AnimateModel(torch.nn.Module):\n \"\"\"\n Merge all generator related updates into single model for better multi-gpu usage\n \"\"\"\n\n def __init__(self, generator, kp_extractor, mapping):\n super(AnimateModel, self).__init__()\n self.kp_extractor = kp_extractor\n self.generator = generator\n self.mapping = mapping\n\n self.kp_extractor.eval()\n self.generator.eval()\n self.mapping.eval()\n\n def forward(self, x):\n \n source_image = x['source_image']\n source_semantics = x['source_semantics']\n target_semantics = x['target_semantics']\n yaw_c_seq = x['yaw_c_seq']\n pitch_c_seq = x['pitch_c_seq']\n roll_c_seq = x['roll_c_seq']\n\n predictions_video = make_animation(source_image, source_semantics, target_semantics,\n self.generator, self.kp_extractor,\n self.mapping, use_exp = True,\n yaw_c_seq=yaw_c_seq, pitch_c_seq=pitch_c_seq, roll_c_seq=roll_c_seq)\n \n return predictions_video\n","repo_name":"Spycsh/xtalker","sub_path":"src/facerender/modules/make_animation.py","file_name":"make_animation.py","file_ext":"py","file_size_in_byte":8771,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"94"} +{"seq_id":"10312690500","text":"import time\nfrom socket import *\n\nclient = []\n\ns = socket(AF_INET, SOCK_DGRAM)\ns.bind(('', 2500))\n\nprint(\"Server started\")\n\nwhile True : \n data , addr = s.recvfrom(1024)\n if 'quit' in data.decode():\n if addr in client:\n print(addr, 'exited')\n client.remove(addr)\n continue\n \n if addr not in client:\n print(\"new client\", addr)\n client.append(addr)\n \n print(time.asctime() + str(addr) + \":\" + data.decode())\n\n for c in client:\n if c !=addr:\n s.sendto(data, c)\n\n ","repo_name":"jjimini98/Network-Programming","sub_path":"Week9/practice/udp_multi_chat_server.py","file_name":"udp_multi_chat_server.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"21266166113","text":"import csv\nimport pandas as pd\nimport math\nimport numpy as np\nfrom sklearn import tree\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\n\nbatters = []\npitchers = []\npeople = []\nsalaries = []\n\ntrainingList = []\ntestList = []\n\nwith open(\"../data/core/Salaries.csv\") as csvDataFile:\n csvReader = csv.reader(csvDataFile)\n for row in csvReader:\n salaries.append([row[0], row[1], row[2], row[3], row[4]])\nwith open(\"../data/core/People.csv\") as csvDataFile1:\n csvReader1 = csv.reader(csvDataFile1)\n for row1 in csvReader1:\n #id, birth year, month, day, first name, last name\n people.append([row1[0], row1[1], row1[2], row1[3], row1[13], row1[14]])\nwith open(\"../data/core/NewBatting.csv\") as csvDataFile2:\n csvReader2 = csv.reader(csvDataFile2)\n for row in csvReader2:\n avg = 0.0\n obp = 0.0\n slg = 0.0\n\n if float(row[6]) != 0.0:\n avg = float(row[8]) / float(row[6])\n slg = (float(row[8]) + (2 * float(row[9])) + (3 * float(row[10])) + (4 * float(row[11]))) / float(row[6])\n\n if (float(row[6]) + float(row[15]) + float(row[17]) + float(row[18]) + float(row[20])) != 0.0:\n obp = (float(row[8]) + float(row[15]) + float(row[17]) + float(row[18])) / (float(row[6]) + float(row[15]) + float(row[17]) + float(row[18]) + float(row[20]))\n \n batters.append([row[0], row[1], row[3], str(avg), str(obp), str(slg), row[11], row[13]])\n\nwith open(\"../data/core/Pitching.csv\") as csvDataFile3:\n csvReader3 = csv.reader(csvDataFile3)\n for row3 in csvReader3:\n # id, year, w, l, g, gs, cg, sho, sv, ip, h, er, hr,\n # bb, so, oppavg, era, ibb, wp, hbp, bk, bfp, gf, r\n # sh, sf, gidp\n pitchers.append([row3[0], row3[1], row3[5], row3[6], row3[7], row3[8], row3[9], row3[10], row3[11], row3[12], row3[13],\n row3[14], row3[15], row3[16], row3[17], row3[18], row3[19], row3[20], row3[21], row3[22], row3[23], row3[24], row3[25], \n row3[26], row3[27], row3[28], row3[29]])\n # if tempPerson3[0] == tempPerson[3] and tempPerson3[1] == tempPerson[0]:\n # tempPerson.append([row3[5], row3[6], row3[7], row3[8], row3[9], row3[10], row3[11], row3[12], row3[13],\n # row3[14], row3[15], row3[16], row3[17], row3[18], row3[19], row3[20], row3[21], row3[22], row3[23], row3[24], row3[25], \n # row3[26], row3[27], row3[28], row3[29]])\n\nloopCounter = 0\n\ntrainArr = []\ntestArr = []\nfor player in salaries:\n print(loopCounter)\n \n for batter in batters: \n if player[3] == batter[0] and player[0] == batter[1] and batter[2] == player[1]:\n if len(player) <= 6:\n player.append(batter[3])\n player.append(batter[4])\n player.append(batter[5])\n player.append(batter[6])\n player.append(batter[7])\n\n time = 2017 - int(player[0])\n temp = math.pow(1.03, time)\n adjustedSalary = int(player[4]) * temp\n player.append(adjustedSalary)\n\n if adjustedSalary < 1000000:\n player.append(1) # minimum\n\n elif adjustedSalary < 5000000 and adjustedSalary >= 1000000:\n player.append(2) # very low \n\n elif adjustedSalary < 10000000 and adjustedSalary >= 5000000:\n player.append(3) # low\n \n elif adjustedSalary < 15000000 and adjustedSalary >= 10000000:\n player.append(4) # average\n\n elif adjustedSalary < 20000000 and adjustedSalary >= 15000000:\n player.append(5) # high\n\n elif adjustedSalary < 25000000 and adjustedSalary >= 20000000:\n player.append(6) # very high\n\n elif adjustedSalary < 300000000 and adjustedSalary >= 25000000:\n player.append(7) # mvp\n\n else:\n player.append(-1) # no data\n\n for person in people:\n if player[3] == person[0]:\n age = int(player[0]) - int(person[1])\n player.append(age)\n\n trainingList.append(player) \n\n loopCounter += 1\n print(loopCounter)\n\n# for pitcher in pitchers:\n# if player[3] == pitcher[0] and player[0] == pitcher[1]:\n# del pitcher[0:1]\n# player.append(pitcher)\n\ntrainingDF = pd.DataFrame(trainingList, columns=['Year', 'Team', 'League', 'Player ID', 'Salary', 'Average', 'On Base Percentage', 'Slugging Percentage', 'Home Runs', 'Stolen Bases', 'Adjusted Salary', 'Category', 'Age'])\n# print(trainingDF)\n\n# testDF = pd.DataFrame(testList, columns=['Year', 'Team', 'League', 'Player ID', 'Salary', 'Average', 'On Base Percentage', 'Slugging Percentage', 'Adjusted Salary', 'Category', 'Age'])\n# print(testDF)\n\ndef clean_dataset(df):\n assert isinstance(df, pd.DataFrame), \"df needs to be a pd.DataFrame\"\n df.dropna(inplace=True)\n indices_to_keep = ~df.isin([np.nan, np.inf, -np.inf]).any(1)\n return df[indices_to_keep].astype(np.float64)\n\n#trainingDF = pd.get_dummies(trainingDF, columns=['Year', 'Team', 'League', 'Player ID'])\n\n# Fill in NaN values\ntrainingDF.fillna(0, inplace=True)\n\n# trainingDF = clean_dataset(trainingDF)\n\n# print(trainingDF.head())\n\n\n# https://www.datacamp.com/community/tutorials/decision-tree-classification-python\n# https://scikit-learn.org/stable/modules/tree.html#classification\nfeatures = ['Average']\nx = trainingDF[features]\ny = trainingDF.Category\n\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=1)\nclf = tree.DecisionTreeClassifier()\nclf = clf.fit(x_train,y_train)\ntree.plot_tree(clf)\ny_pred = clf.predict(x_test)\nprint(\"Accuracy:\",metrics.accuracy_score(y_test, y_pred))\n\n# clf = tree.DecisionTreeClassifier(max_depth=10)\n# clf = clf.fit(trainArr, y)\n\n\n\n","repo_name":"tetreaum/SeniorProject","sub_path":"src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"1435919066","text":"import numpy as np\nimport sys\nimport copy\nimport time\nnp.set_printoptions(threshold=float('inf'), linewidth=1000, suppress=True, precision=5)\n\n\ndef print_row(*strings, width=[11, 16, 16, 16, 16, 16, 40], header=False):\n to_print = '|'\n for i, s in enumerate(strings[:-1]):\n to_print += s.center(width[i])\n to_print += ' | '\n to_print += strings[-1].center(width[-1]) + '|'\n if header:\n print('-' * len(to_print))\n print(to_print)\n if header:\n print('-' * len(to_print))\n\n\nclass Optimizer():\n\n def __init__(self, dim, function, constraints, getoptinfo, name, max_iter, ftol, xtol):\n self.dim = dim\n self.function = function\n self.constraints = constraints\n self.getoptinfo = getoptinfo\n self.it = 1\n self.max_iter = max_iter\n self.ftol = ftol\n self.xtol = xtol\n self.name = name\n\n def step(self, x, fx):\n raise NotImplementedError\n\n def stop_criteria(self):\n raise NotImplementedError\n\n def test_constraints(self, x): # Returns False if a constraint is violated, True otherwise\n return self.constraints.test(x)\n\n def optimize(self, x0, verbose=False):\n self.x = copy.deepcopy(x0)\n self.fx = self.function(self.x)\n self.it = 1\n info = self.getoptinfo(self.x, self.fx)\n self.track = [(self.x, self.fx, None, None, None, info[0], info[1])]\n self.verbose = verbose\n if self.verbose:\n print('\\n%s\\n' % (' {} Optimization Starting '.format(self.name).center(135, '•')))\n print_row('Iteration', '||xk-x*||', '|f(xk)-f(x*)|', '||xk-xk-1||', '|f(xk)-f(xk-1)|', 'f(x)', 'x', header=True)\n print_row('0', '%3.5f' % info[0], '%3.5f' % info[1], 'nan', 'nan', '%3.5f' % self.fx, '%s' % str(self.x.reshape(-1)))\n while True:\n start = time.time()\n self.x_next, self.fx_next = self.step(self.x, self.fx)\n end = time.time()\n self.xdiff = np.linalg.norm(self.x_next - self.x)\n self.fdiff = abs(self.fx_next - self.fx)\n info = self.getoptinfo(self.x_next, self.fx_next)\n\n finish, reason = self.stop_criteria()\n if self.verbose and not (reason is not None and \"constraints violated\" in reason):\n print_row('%d' % self.it, '%3.5f' % info[0], '%3.5f' % info[1], '%3.5f' % self.xdiff, '%3.5f' % self.fdiff, '%3.5f' % self.fx_next, '%s' % str(self.x_next.reshape(-1)))\n if finish:\n if not \"constraints violated\" in reason:\n self.track.append((self.x_next, self.fx_next, end - start, self.xdiff, self.fdiff, info[0], info[1]))\n self.x, self.fx = self.x_next, self.fx_next\n break\n else:\n self.track.append((self.x_next, self.fx_next, end - start, self.xdiff, self.fdiff, info[0], info[1]))\n self.x, self.fx = self.x_next, self.fx_next\n self.it += 1\n\n if self.verbose:\n reason = reason.upper().center(len(reason) + 4, ' ').center(151, '>')\n reason = reason[:int(len(reason) / 2)] + reason[int(len(reason) / 2):].replace('>', '<')\n\n print('%s\\n' % reason)\n\n print('\\n%s\\n' % ('Final Results After {:4d} Iterations'.format(self.it).center(151, '-')))\n print_row('||xk-x*||', '|f(xk)-f(x*)|', 'f(x)', 'x', width=[16, 16, 16, 40], header=True)\n print_row('%3.5f' % self.track[-1][-2], '%3.5f' % self.track[-1][-1], '%3.5f' % self.track[-1][1], '%s' % str(self.track[-1][0].reshape(-1)), width=[16, 16, 16, 40])\n print('-' * 99)\n return self.track","repo_name":"amoutonnet/IEOR262BFinalProject","sub_path":"optimizers/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"32188155091","text":"import numpy as np\nimport tensorflow as tf\nimport cv2\n\nclass Colorizer:\n \"\"\"A wrapper for colorizer trained in keras model\n which pretend to predict images one by one (batch_size = 1)\n without fixed size (target_size = None)\n \"\"\"\n\n def __init__(self, model_path, mode='lab'):\n self._model_path = model_path\n print(model_path)\n self._model = tf.keras.models.load_model(self._model_path)\n if mode not in ('lab', 'rgb'):\n raise ValueError(\"mode must be 'lab' or 'rgb'.\")\n self._mode = mode.lower()\n \n def predict(self, img):\n if type(img) != np.float32:\n img = (img*1/255).astype(np.float32)\n \n img = to_gray(img)\n\n if len(img.shape) < 4:\n img = np.array([img])\n pred = self._model.predict(img, batch_size=1, verbose=0, workers=1)\n # pred = self._model.predict(img, batch_size=1, verbose=0, workers=1)\n pred = np.array([cv2.resize(pred[0], (img.shape[2], img.shape[1]))])\n if self._mode == 'lab':\n print(pred.shape, img.shape)\n pred = cv2.cvtColor((np.concatenate([img[0], pred[0]], axis=-1)*255).astype(np.uint8), cv2.COLOR_LAB2BGR).astype(np.uint8)\n return pred\n\ndef to_gray(x):\n return np.expand_dims(np.average(x, axis=-1, weights=[0.3, 0.59, 0.11]), axis=-1)","repo_name":"tamlc46/automatic-image-colorization","sub_path":"libs/Colorizer.py","file_name":"Colorizer.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"19382759918","text":"a='Lev'\r\nb='Lev am 20 years old'\r\nc='Victor'\r\nd='Victor is 65 years old'\r\ne='Lev and Victor together are'\r\nf='years old'\r\n\r\nprint(len(a+b))\r\nprint(e,'85',f)\r\nprint(e,85,f)\r\n\r\ng='20'\r\nh='65'\r\nprint(g+h)\r\nprint(int(g)+int(h))\r\n\r\n","repo_name":"victenna/LEV","sub_path":"lev7(string ).py","file_name":"lev7(string ).py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"35158372463","text":"import numpy as np\n\n\nclass TSPBranchAndBoundary:\n class Tree:\n def __init__(self, prev, left, right, matrix, vertex, phi, is_taken, vertices):\n self.prev = prev\n self.left = left\n self.right = right\n self.matrix = matrix\n self.vertex = vertex\n self.phi = phi\n self.is_taken = is_taken\n self.vertices = vertices\n\n def __init__(self, array):\n for i in range(len(array)):\n for j in range(len(array)):\n if array[i][j] == -1:\n array[i][j] = float(\"inf\")\n\n self.solution = self.Tree(\n None,\n None,\n None,\n np.array(array),\n None,\n 0,\n True,\n [range(len(array)), range(len(array))]\n )\n\n self.graph_size = len(array[0])\n self.path_cost = sum(array[i][i + 1] for i in range(len(array) - 1)) + array[len(array) - 1][0]\n self.saved_vertex = self.solution\n self.max_fine = 0\n self.max_fine_item = (0, 0)\n\n def solve(self):\n self.split_plurality(self.solution)\n\n temp = [-1] * self.graph_size\n while self.saved_vertex.prev:\n if self.saved_vertex.is_taken:\n temp[self.saved_vertex.vertex[0]] = self.saved_vertex.vertex[1]\n self.saved_vertex = self.saved_vertex.prev\n\n answer = []\n n = 0\n while self.graph_size:\n answer.append(n)\n n = temp[n]\n self.graph_size -= 1\n\n return answer, self.path_cost\n\n def split_plurality(self, solution):\n if not solution:\n return\n\n if solution.matrix.shape[0] > 2:\n solution.matrix, solution.phi = self.matrix_reduction(solution.matrix, solution.phi)\n\n self.zero_fines(solution.matrix)\n\n vertex_to_go = (solution.vertices[0][self.max_fine_item[0]], solution.vertices[1][self.max_fine_item[1]])\n\n if not solution.phi + self.max_fine == float(\"inf\") and solution.phi + self.max_fine < self.path_cost:\n temp_matrix = solution.matrix\n if solution.vertex:\n temp_matrix[solution.vertices[0].index(vertex_to_go[0]), solution.vertices[1].index(vertex_to_go[1])] = float(\"inf\")\n\n solution.right = self.Tree(\n solution,\n None,\n None,\n temp_matrix,\n vertex_to_go,\n solution.phi + self.max_fine,\n False,\n solution.vertices\n )\n\n solution.matrix = np.delete(solution.matrix, self.max_fine_item[0], 0)\n solution.matrix = np.delete(solution.matrix, self.max_fine_item[1], 1)\n\n solution.vertices = [\n [x for index, x in enumerate(solution.vertices[0]) if index != self.max_fine_item[0]],\n [x for index, x in enumerate(solution.vertices[1]) if index != self.max_fine_item[1]]\n ]\n\n solution.matrix = self.prevent_cycles(solution.matrix, vertex_to_go, solution.vertices)\n solution.matrix, solution.phi = self.matrix_reduction(solution.matrix, solution.phi)\n\n solution.left = self.Tree(\n solution,\n None,\n None,\n solution.matrix,\n vertex_to_go,\n solution.phi,\n True,\n solution.vertices\n )\n\n self.split_plurality(solution.left)\n\n self.split_plurality(solution.right)\n\n if solution.matrix.shape[0] == 2:\n solution.left = self.Tree(\n solution,\n None,\n None,\n solution.matrix,\n (solution.vertices[0][0], solution.vertices[1][np.where(solution.matrix[0, :] != float(\"inf\"))[0][0]]),\n solution.phi + min(solution.matrix[0, :]),\n True,\n solution.vertices\n )\n solution.left.left = self.Tree(\n solution.left,\n None,\n None,\n solution.vertices[1][np.where(solution.matrix[1, :] != float(\"inf\"))[0][0]],\n (solution.vertices[0][1], solution.vertices[1][np.where(solution.matrix[1, :] != float(\"inf\"))[0][0]]),\n solution.phi + min(solution.matrix[1, :]),\n True,\n solution.vertices\n )\n\n solution = solution.left.left\n\n if solution.phi < self.path_cost:\n self.path_cost = solution.phi\n self.saved_vertex = solution\n\n @staticmethod\n def prevent_cycles(matrix, vertex_to_go, vertices):\n if vertex_to_go[1] in vertices[0] and vertex_to_go[0] in vertices[1]:\n matrix[vertices[0].index(vertex_to_go[1]), vertices[1].index(vertex_to_go[0])] = float(\"inf\")\n else:\n row_index = 0\n column_index = 0\n\n for index, column in enumerate(matrix.transpose()):\n if max(column) < float(\"inf\"):\n column_index = index\n break\n\n for index, row in enumerate(matrix):\n if max(row) < float(\"inf\"):\n row_index = index\n break\n\n matrix[row_index][column_index] = float(\"inf\")\n\n return matrix\n\n @staticmethod\n def matrix_reduction(matrix, phi):\n phi += sum(matrix.min(axis=0))\n matrix = matrix - matrix.min(axis=0)\n phi += sum(matrix.min(axis=1))\n matrix = matrix - matrix.min(axis=1)[:, np.newaxis]\n\n return matrix, phi\n\n def zero_fines(self, matrix):\n self.max_fine = 0\n self.max_fine_item = (0, 0)\n\n for row_index, row in enumerate(matrix):\n for index, item in enumerate(row):\n temp_min = 0\n if item == 0:\n temp = matrix[row_index, index]\n matrix[row_index, index] = float(\"inf\")\n temp_min += min(row) + min(matrix[:, index])\n matrix[row_index, index] = temp\n\n if temp_min > self.max_fine:\n self.max_fine = temp_min\n self.max_fine_item = (row_index, index)\n","repo_name":"dawnhell/algos-labs-2018","sub_path":"TSP/TSP_branch_and_boundary.py","file_name":"TSP_branch_and_boundary.py","file_ext":"py","file_size_in_byte":6375,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"29798593245","text":"import re\r\nimport json\r\n\r\n# Read the WhatsApp conversation file\r\nwith open('conversation3.txt', 'r', encoding='utf-8') as file:\r\n chat_data = file.readlines()\r\n\r\nchat_entries = []\r\n\r\n# Define a list to store the chat data\r\nchat_list = {\r\n \"questions\": []\r\n}\r\n\r\ncurrent_question = None\r\n\r\nfor line in chat_data:\r\n # Use regular expressions to extract sender and message content\r\n match = re.match(r'(\\w+): (.*)', line)\r\n if match:\r\n sender, message = match.groups()\r\n\r\n # If the current message is a question, create a new question entry\r\n if current_question is None:\r\n current_question = {\"question\": message, \"answer\": None}\r\n else:\r\n # If the current message is an answer, add it to the current question\r\n current_question[\"answer\"] = message\r\n chat_list[\"questions\"].append(current_question)\r\n current_question = None\r\n\r\n# Save the data to chat.json\r\nwith open('chat.json', 'w') as json_file:\r\n json.dump(chat_list, json_file, indent=2)\r\n","repo_name":"aRPIT0313/chatbot","sub_path":"read_wp.py","file_name":"read_wp.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"8529562176","text":"import cv2\nimport os\nimport numpy as np\n\n\ndef displayImg(imgNormWhite, mask3, img_mask, filenameImg, label, label_subTypes, dirFrames):\n\n # font\n font = cv2.FONT_HERSHEY_SIMPLEX\n # org\n org1 = (50, 50)\n org2 = (50, 100)\n org3 = (50, 150)\n # fontScale\n fontScale = 1\n # Blue color in BGR\n color = (255, 0, 0)\n # Line thickness of 2 px\n thickness = 2\n titleStr = 'Result (press ANY key to continue)'\n textResult1 = filenameImg\n textResult2 = 'Label: {0} (Label w. subtypes: {1})'.format(label.lower(), label_subTypes.lower())\n # textResult3 = 'Output: {0}'.format(output.lower())\n final_frame = cv2.hconcat((imgNormWhite, mask3, img_mask))\n cv2.putText(final_frame, textResult1, org1, font, fontScale, color, thickness, cv2.LINE_AA)\n cv2.putText(final_frame, textResult2, org2, font, fontScale, color, thickness, cv2.LINE_AA)\n # cv2.putText(final_frame, textResult3, org3, font, fontScale, color, thickness, cv2.LINE_AA)\n cv2.namedWindow(titleStr)\n cv2.setWindowProperty(titleStr, cv2.WND_PROP_TOPMOST, 1)\n cv2.moveWindow(titleStr, 100, 100)\n cv2.imshow(titleStr, final_frame)\n cv2.imwrite(os.path.join(dirFrames, filenameImg), final_frame)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\ndef updateImg(img, output, output_subTypes, filenameImg, dirFrames):\n\n img = np.uint8(img)\n\n # font\n font = cv2.FONT_HERSHEY_SIMPLEX\n # org\n org3 = (50, 150)\n # fontScale\n fontScale = 1\n # Blue color in BGR\n color = (255, 0, 0)\n # Line thickness of 2 px\n thickness = 2\n titleStr = 'Result (press ANY key to continue)'\n textResult3 = 'Output: {0} (Output w. subtypes: {1})'.format(output.lower(), output_subTypes.lower())\n cv2.putText(img, textResult3, org3, font, fontScale, color, thickness, cv2.LINE_AA)\n cv2.imwrite(os.path.join(dirFrames, filenameImg), img)\n\n","repo_name":"AngeloUNIMI/AVB","sub_path":"functions/displayImg.py","file_name":"displayImg.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"70189316791","text":"import boto3\nimport json\nfrom datetime import datetime\n\nwith open(\"../credentials/aws_credentials.json\") as credfile:\n cred = json.load(credfile)\n access_key = cred[\"access_key\"]\n secret_key = cred[\"secret_key\"]\n\nsns = boto3.client(\n \"sns\",\n region_name=\"us-east-1\",\n aws_access_key_id=access_key,\n aws_secret_access_key=secret_key\n)\n\nreload_success = sns.create_topic(Name=\"reload-success\") # Change the topic name if you want\n\n# You can subscribe your email address thru AWS Console too\n# sub = sns.subscribe(\n# TopicArn=reload_success[\"TopicArn\"],\n# Protocol=\"email\",\n# Endpoint=\"user@email.com\"\n# )\n\n\ndef send_success_message():\n sns.publish(\n TopicArn=reload_success[\"TopicArn\"],\n Message=f\"\"\"The spreadsheet Python.org Jobs has been successfully reloaded.\n Update time: {datetime.now()}\"\"\",\n Subject=\"Reload successful - Python.org Jobs\"\n )\n\n\ndef send_failure_message(old_state, new_state):\n sns.publish(\n TopicArn=reload_success[\"TopicArn\"],\n Message=f\"\"\"There is a problem on the pipeline for the Python.org Jobs spreadsheet.\n Update time: {datetime.now()}\n Task: {old_state}\n Error: {new_state}\"\"\",\n Subject=\"Reload FAILED - Python.org Jobs\"\n )\n quit(1)\n","repo_name":"umitkaanusta/smol-elt","sub_path":"smol/sns_utils.py","file_name":"sns_utils.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"94"} +{"seq_id":"31308086746","text":"def is_palindrome_recursive(word):\n \"\"\"\n This function identifies whether a word is a palindrome recursively.\n\n Parameters\n ----------\n word : str\n Word we wish to check\n \n Returns\n -------\n boolean\n True if input is a palindrome, False otherwise\n \n \"\"\"\n if type(word) is int:\n word = str(word)\n haha = [i for i in word]\n print(haha)\n\n if len(haha) > 1:\n if haha[0] == haha[-1]:\n haha.pop(0)\n haha.pop(-1)\n return is_palindrome_recursive(haha)\n if haha[0] != haha[-1]:\n return False\n\n return True\n\nprint(is_palindrome_recursive(1211))\n\n \n","repo_name":"mariaeduardatc/cs110","sub_path":"tests pcw/palindrome_rec.py","file_name":"palindrome_rec.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"1687595547","text":"import numpy as np\r\nimport pretty_midi\r\nimport glob\r\n\r\n#This file reads in the midi file converts it to a text file of a monophonic version\r\n#The highest note currently being played is what is written into the encoded file\r\n\r\n\r\ndef get_piano_roll(midifile):\r\n\tmidi_pretty_format = pretty_midi.PrettyMIDI(midifile)\r\n\tpiano_midi = midi_pretty_format.instruments[0] # Get the piano channels\r\n\tpiano_roll = piano_midi.get_piano_roll(fs=20)\r\n\treturn piano_roll\r\n\r\n#different to encode in other scripts due to breaking from loop once first (highest note) is found\r\ndef encode(arr):\r\n timeinc=0\r\n outString=\"\"\r\n\r\n for time in arr:\r\n i=0\r\n max=0\r\n for t in np.flip(time): #flips column of array so you are starting from note 127\r\n if t >0:\r\n max=t #first value found that isn't 0 is the max for that column\r\n #max also tells us the velocity at that note\r\n break\r\n i+=1 #how far you had to move from 127\r\n if i==128: # looped the whole way through 128-0 and nothing found so nothing is being played\r\n temp=\"#\"\r\n else:\r\n i=127-i # getting the actual note being played by taking steps from 127 away from 127\r\n temp=\"(\"+str(i)+\",\"+str(max)+\")\" #writing combined encoding for note at this time step\r\n print(temp)\r\n file1.write(temp+\"\\n\")\r\n return outString\r\n\r\nfile1 = open(\"Schubert_-_Allegretto_in_C_Minor_D.915_Mono.txt\", \"a\") #file to write monophonic version to\r\npr = get_piano_roll(\"Schubert_-_Allegretto_in_C_Minor_D.915.mid\") #reading in the polyphonic version of the song\r\narr = pr.T\r\nencode(arr)\r\nfile1.close()","repo_name":"brennan2602/FYP","sub_path":"splitPoly.py","file_name":"splitPoly.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"72652273589","text":"# -*- coding: utf-8 -*-\n'''\nCreated on 2012-09-24\n\n@author: Sergey <me@seriyps.ru>\n\nHow to test:\n\n$ head -c 100M /dev/urandom > static_file.bin\n$ python\n>>> from urllib2 import urlopen\n>>>\n>>> m100 = 104857600\n>>> host = \"http://localhost:8888\"\n>>>\n>>> assert len(urlopen(host + \"/big_file.bin\").read()) == m100\n>>> assert len(urlopen(host + \"/big_stream.bin\").read()) == m100\n>>> assert len(urlopen(host + \"/big_stream_gen.bin\").read()) == m100\n>>>\n>>> assert len(urlopen(host + \"/big_file.bin\").read(1024)) == 1024\n>>> assert len(urlopen(host + \"/big_stream.bin\").read(1024)) == 1024\n>>> assert len(urlopen(host + \"/big_stream_gen.bin\").read(1024)) == 1024\n\nThis code make tornado consume memory and CPU:\n\n>>> [urlopen(host + \"/big_file.bin\").read(1) for _ in xrange(100)]\n\n'''\nimport tornado.ioloop\nimport tornado.web\nimport tornado.gen\nimport os\n\n\nclass FileStreamerHandler(tornado.web.RequestHandler):\n \"\"\"Standard static file handler loads whole file in memory. This one\n read file chunk-by-chunk.\"\"\"\n\n CHUNK_SIZE = 512000 # 0.5 MB\n\n def initialize(self, file_path):\n self.path = file_path\n\n @tornado.web.asynchronous\n def get(self):\n self.set_header(\"Content-Type\", \"text/plain\")\n self.set_header(\"Content-Length\", os.path.getsize(self.path))\n self._fd = open(self.path, \"rb\")\n self.flush()\n self.write_more()\n\n def write_more(self):\n data = self._fd.read(self.CHUNK_SIZE)\n if not data:\n self.finish()\n self._fd.close()\n return\n self.write(data)\n self.flush(callback=self.write_more)\n\n\nclass GenFileStreamerHandler(tornado.web.RequestHandler):\n \"\"\"Standard static file handler loads whole file in memory. This one\n read file chunk-by-chunk.\"\"\"\n\n CHUNK_SIZE = 512000 # 0.5 MB\n\n def initialize(self, file_path):\n self.path = file_path\n\n @tornado.web.asynchronous\n @tornado.gen.engine\n def get(self):\n self.set_header(\"Content-Type\", \"text/plain\")\n self.set_header(\"Content-Length\", os.path.getsize(self.path))\n self.flush()\n # \"with\" statement didn't work properly in this context (didn't release\n # resources when socket inproperly closed by client)\n # with open(self.path, \"rb\") as fd:\n # data = fd.read(self.CHUNK_SIZE)\n # while data:\n # self.write(data)\n # yield tornado.gen.Task(self.flush)\n # data = fd.read(self.CHUNK_SIZE)\n fd = open(self.path, \"rb\")\n data = fd.read(self.CHUNK_SIZE)\n while data:\n self.write(data)\n yield tornado.gen.Task(self.flush)\n data = fd.read(self.CHUNK_SIZE)\n fd.close()\n self.finish()\n\n\napplication = tornado.web.Application([\n (r\"/(big_file.bin)\", tornado.web.StaticFileHandler,\n {\"path\": \".\"}),\n (r\"/big_stream.bin\", FileStreamerHandler,\n {\"file_path\": \"big_file.bin\"}),\n (r\"/big_stream_gen.bin\", GenFileStreamerHandler,\n {\"file_path\": \"big_file.bin\"}),\n])\n\nif __name__ == \"__main__\":\n application.listen(8888)\n tornado.ioloop.IOLoop.instance().start()","repo_name":"gistable/gistable","sub_path":"dockerized-gists/3773703/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":3189,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"94"} +{"seq_id":"6323700178","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n@date: 2021/1/1 下午9:19\n@file: test_selective_kernel_conv2d.py\n@author: zj\n@description: \n\"\"\"\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom resnest.torch.splat import SplAtConv2d\n\nfrom zcls.model.layers.split_attention_conv2d import SplitAttentionConv2d\n\n\ndef init_weights(modules):\n for m in modules:\n if isinstance(m, nn.Conv2d):\n nn.init.constant_(m.weight, 0.01)\n # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if m.bias is not None:\n nn.init.zeros_(m.bias)\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n # nn.init.normal_(m.weight, 0, 0.01)\n nn.init.constant_(m.weight, 0.01)\n if m.bias is not None:\n nn.init.zeros_(m.bias)\n\n\ndef get_custom_res():\n # 保证训练时获取的随机数都是一样的\n init_seed = 1\n torch.manual_seed(init_seed)\n torch.cuda.manual_seed(init_seed)\n np.random.seed(init_seed)\n\n num = 3\n in_channels = 64\n out_channels = 128\n data = torch.randn(num, in_channels, 56, 56)\n\n # custom\n model = SplitAttentionConv2d(in_channels,\n out_channels,\n radix=2,\n groups=32,\n reduction_rate=16\n )\n init_weights(model.modules())\n print(model)\n outputs_c = model(data)\n print(outputs_c.shape)\n\n return outputs_c\n\n\ndef get_official_res():\n # 保证训练时获取的随机数都是一样的\n init_seed = 1\n torch.manual_seed(init_seed)\n torch.cuda.manual_seed(init_seed)\n np.random.seed(init_seed)\n\n num = 3\n in_channels = 64\n out_channels = 128\n data = torch.randn(num, in_channels, 56, 56)\n\n # official\n model = SplAtConv2d(in_channels,\n out_channels,\n 3,\n norm_layer=nn.BatchNorm2d,\n bias=False,\n padding=1,\n radix=2,\n groups=32,\n reduction_factor=16\n )\n init_weights(model.modules())\n print(model)\n outputs_o = model(data)\n print(outputs_o.shape)\n\n return outputs_o\n\n\ndef compare():\n outputs_c = get_custom_res()\n outputs_o = get_official_res()\n\n res = torch.allclose(outputs_c, outputs_o)\n print(res)\n\n\ndef test_split_attention_conv2d():\n num = 3\n in_channels = 64\n out_channels = 128\n data = torch.randn(num, in_channels, 56, 56)\n\n # 不进行分组\n model = SplitAttentionConv2d(in_channels,\n out_channels,\n radix=2,\n groups=1,\n reduction_rate=4\n )\n print(model)\n outputs = model(data)\n print(outputs.shape)\n\n assert outputs.shape == (num, out_channels, 56, 56)\n\n # 不进行radix\n model = SplitAttentionConv2d(in_channels,\n out_channels,\n radix=1,\n groups=1,\n reduction_rate=16\n )\n print(model)\n outputs = model(data)\n print(outputs.shape)\n\n assert outputs.shape == (num, out_channels, 56, 56)\n\n # 同时实现radix和group\n model = SplitAttentionConv2d(in_channels,\n out_channels,\n radix=2,\n groups=32,\n reduction_rate=16\n )\n print(model)\n outputs = model(data)\n print(outputs.shape)\n\n assert outputs.shape == (num, out_channels, 56, 56)\n\n\nif __name__ == '__main__':\n compare()\n test_split_attention_conv2d()\n","repo_name":"ZJCV/ZCls","sub_path":"tests/test_model/test_layer/test_split_attention_conv2d.py","file_name":"test_split_attention_conv2d.py","file_ext":"py","file_size_in_byte":4080,"program_lang":"python","lang":"en","doc_type":"code","stars":136,"dataset":"github-code","pt":"94"} +{"seq_id":"20325594158","text":"import ports\nimport os\nfrom flask import Flask, render_template, request, url_for, abort, redirect\nfrom flask_sqlalchemy import SQLAlchemy\nimport waitress\n\n#if PORT env is specified, use that, otherwise use port 5000 flask default\nPORT = os.environ.get('PORT', 5000)\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///ports.sqlite'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\nports.db.init_app(app)\n\n@app.route('/index')\n@app.route('/', methods=['GET'])\ndef entry():\n return render_template('input_page.html', result=False)\n\n@app.route('/', methods=['POST'])\ndef submit(): \n #mysterious, but necessary step. Without next line, request is null.\n request.get_data()\n userinput = request.form['port_input']\n return redirect(url_for('port_response', submitted_port=userinput), 303)\n\n@app.route('/port/<int:submitted_port>', methods=['GET'])\ndef port_response(submitted_port):\n\n if submitted_port == '':\n return render_template('input_page.html', result=False)\n \n try:\n portInt = int(submitted_port)\n except ValueError:\n abort(400)\n\n search_result = ports.search_port(portInt)\n if search_result != None:\n success = True\n result = search_result\n else:\n success = False\n result = \"No service found!\"\n\n return render_template('input_page.html', success=success, port=portInt, result=result)\n\n\n@app.route ('/healthcheck')\ndef healthcheck():\n return 'ok'\n\nif __name__ == \"__main__\":\n waitress.serve(app, host='0.0.0.0', port=PORT)\n","repo_name":"karldreher/KnowYourPorts","sub_path":"web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"37269137453","text":"import random\n\nimport numpy as np\nfrom tqdm import tqdm\n\nclass Cha:\n def __init__(self, atk, hp, ap):\n self.atk = atk\n self.hp = hp\n self.ap = ap\n\n def dice(self):\n return random.randint(1, 100) + self.atk + int(np.ceil(self.ap//20))\n\n def hurt(self, atk):\n if self.ap > 0:\n self.ap -= atk\n if self.ap < 0:\n self.ap = 0\n else:\n self.hp -= 1\n\n def check(self):\n return self.hp <= 0\n\ndef fight(cha1:Cha, cha2:Cha):\n while True:\n d1 = cha1.dice()\n d2 = cha2.dice()\n if (d1==d2):\n continue\n if d1>d2:\n cha2.hurt(d1)\n if cha2.check():\n return 1\n else:\n cha1.hurt(d2)\n if cha1.check():\n return 0\n\nwin = 0\nfor i in tqdm(range(1000000)):\n nana = Cha(85, 3, 0)\n luna = Cha(61, 1, 220)\n win += fight(nana, luna)\n\nprint(win / 10000, '%')\n\n","repo_name":"Xiaoyuan-xyz/XYZPytonTool","sub_path":"dice_simulator/old/easydice.py","file_name":"easydice.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"23299020498","text":"# -*- encoding: utf-8 -*-\n# @Author: peng wei\n# @Time: 2021/7/21 上午11:05\n\nimport json\nfrom time import sleep\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as ec\nfrom selenium.common.exceptions import NoSuchElementException\nfrom src.main.python.lib.alertBox import BeAlertBox\nfrom src.main.python.lib.pageMaskWait import page_wait\nfrom src.main.python.lib.positionPanel import getPanelXpath\nfrom src.main.python.core.app.VisualModeler.doctorWho import DoctorWho\nfrom src.main.python.lib.logger import log\nfrom src.main.python.lib.globals import gbl\n\n\nclass Database:\n\n def __init__(self):\n self.browser = gbl.service.get(\"browser\")\n DoctorWho().choose_menu(\"常用信息管理-数据库管理\")\n self.browser.switch_to.frame(\n self.browser.find_element(By.XPATH, \"//iframe[contains(@src, '/VisualModeler/html/commonInfo/dbCfg.html')]\"))\n sleep(1)\n\n def search(self, query, need_choose=False):\n \"\"\"\n :param query: 查询条件,字典\n :param need_choose: True/False\n \"\"\"\n if not isinstance(query, dict):\n raise TypeError(\"查询条件需要是字典格式\")\n log.info(\"查询条件: {0}\".format(json.dumps(query, ensure_ascii=False)))\n select_item = None\n\n # 数据库名称\n if query.__contains__(\"数据库名称\"):\n db_name = query.get(\"数据库名称\")\n self.browser.find_element(By.XPATH, \"//*[@id='db_name']/following-sibling::span/input[1]\").clear()\n self.browser.find_element(By.XPATH, \"//*[@id='db_name']/following-sibling::span/input[1]\").send_keys(db_name)\n select_item = db_name\n\n # 数据库URL\n if query.__contains__(\"数据库URL\"):\n db_url = query.get(\"数据库URL\")\n self.browser.find_element(By.XPATH, \"//*[@id='db_url']/following-sibling::span/input[1]\").clear()\n self.browser.find_element(By.XPATH, \"//*[@id='db_url']/following-sibling::span/input[1]\").send_keys(db_url)\n\n # 数据库驱动\n if query.__contains__(\"数据库驱动\"):\n db_driver = query.get(\"数据库驱动\")\n self.browser.find_element(By.XPATH, \"//*[@id='db_driver']/following-sibling::span//a\").click()\n panel_xpath = getPanelXpath()\n self.browser.find_element(\n By.XPATH, panel_xpath + \"//*[contains(@id,'db_driver') and text()='{0}']\".format(db_driver)).click()\n\n # 归属类型\n if query.__contains__(\"归属类型\"):\n belong_type = query.get(\"归属类型\")\n self.browser.find_element(By.XPATH, \"//*[@id='belong_type']/following-sibling::span//a\").click()\n panel_xpath = getPanelXpath()\n self.browser.find_element(\n By.XPATH, panel_xpath + \"//*[contains(@id,'belong_type') and text()='{0}']\".format(belong_type)).click()\n\n # 查询\n self.browser.find_element(By.XPATH, \"//*[@id='btn']\").click()\n page_wait()\n alert = BeAlertBox(timeout=1, back_iframe=False)\n if alert.exist_alert:\n msg = alert.get_msg()\n log.info(\"弹出框返回: {0}\".format(msg))\n gbl.temp.set(\"ResultMsg\", msg)\n return\n if need_choose:\n if select_item:\n try:\n self.browser.find_element(By.XPATH, \"//*[@field='dbName']/*[text()='{0}']\".format(select_item)).click()\n except NoSuchElementException:\n raise KeyError(\"未找到匹配数据\")\n log.info(\"选择: {0}\".format(select_item))\n else:\n raise KeyError(\"条件不足,无法选择数据\")\n\n def add(self, db_name, db_driver, db_url, username, pwd, belong_type, data_type):\n \"\"\"\n :param db_name: 数据库名称\n :param db_driver: 数据库驱动\n :param db_url: 数据库URL\n :param username: 用户名\n :param pwd: 密码\n :param belong_type: 归属类型\n :param data_type: 数据类型\n \"\"\"\n log.info(\"开始添加数据\")\n self.browser.find_element(By.XPATH, \"//*[@id='addBtn']\").click()\n wait = WebDriverWait(self.browser, 30)\n wait.until(ec.frame_to_be_available_and_switch_to_it((\n By.XPATH, \"//iframe[contains(@src,'dbCfgEdit.html?type=add')]\")))\n sleep(1)\n wait = WebDriverWait(self.browser, 30)\n wait.until(ec.element_to_be_clickable((By.XPATH, \"//*[@name='dbName']/preceding-sibling::input\")))\n\n self.database_page(db_name=db_name, db_driver=db_driver, db_url=db_url, username=username, pwd=pwd,\n belong_type=belong_type, data_type=data_type)\n # 提交\n self.browser.find_element(By.XPATH, \"//*[@id='submitBtn']\").click()\n alert = BeAlertBox()\n msg = alert.get_msg()\n if alert.title_contains(\"保存成功\"):\n log.info(\"数据 {0} 添加成功\".format(db_name))\n else:\n log.warning(\"数据 {0} 添加失败,失败提示: {1}\".format(db_name, msg))\n gbl.temp.set(\"ResultMsg\", msg)\n\n def update(self, db, db_name, db_driver, db_url, username, pwd, belong_type, data_type):\n \"\"\"\n :param db: 数据库名称\n :param db_name: 数据库名称\n :param db_driver: 数据库驱动\n :param db_url: 数据库URL\n :param username: 用户名\n :param pwd: 密码\n :param belong_type: 归属类型\n :param data_type: 数据类型\n \"\"\"\n log.info(\"开始修改数据\")\n self.search(query={\"数据库名称\": db}, need_choose=True)\n self.browser.find_element(By.XPATH, \"//*[@id='editBtn']\").click()\n\n # 鉴于数据权限问题,在修改/删除数据时,需要判断是否有弹出框提示无权操作\n alert = BeAlertBox(back_iframe=False, timeout=2)\n if alert.exist_alert:\n msg = alert.get_msg()\n gbl.temp.set(\"ResultMsg\", msg)\n else:\n wait = WebDriverWait(self.browser, 10)\n wait.until(ec.frame_to_be_available_and_switch_to_it((\n By.XPATH, \"//iframe[contains(@src,'dbCfgEdit.html?type=edit')]\")))\n sleep(1)\n wait = WebDriverWait(self.browser, 30)\n wait.until(ec.element_to_be_clickable((By.XPATH, \"//*[@name='dbName']/preceding-sibling::input\")))\n\n self.database_page(db_name=db_name, db_driver=db_driver, db_url=db_url, username=username, pwd=pwd,\n belong_type=belong_type, data_type=data_type)\n # 提交\n self.browser.find_element(By.XPATH, \"//*[@id='submitBtn']\").click()\n alert = BeAlertBox()\n msg = alert.get_msg()\n if alert.title_contains(\"保存成功\"):\n log.info(\"{0} 修改成功\".format(db))\n else:\n log.warning(\"{0} 修改失败,失败提示: {1}\".format(db, msg))\n gbl.temp.set(\"ResultMsg\", msg)\n\n def database_page(self, db_name, db_driver, db_url, username, pwd, belong_type, data_type):\n \"\"\"\n :param db_name: 数据库名称\n :param db_driver: 数据库驱动\n :param db_url: 数据库URL\n :param username: 用户名\n :param pwd: 密码\n :param belong_type: 归属类型\n :param data_type: 数据类型\n \"\"\"\n # 数据库名称\n if db_name:\n self.browser.find_element(By.XPATH, \"//*[@name='dbName']/preceding-sibling::input\").clear()\n self.browser.find_element(By.XPATH, \"//*[@name='dbName']/preceding-sibling::input\").send_keys(db_name)\n log.info(\"设置数据库名称: {0}\".format(db_name))\n\n # 数据库驱动\n if db_driver:\n self.browser.find_element(By.XPATH, \"//*[@name='dbDriver']/preceding-sibling::input\").click()\n self.browser.find_element(By.XPATH, \"//*[contains(@id,'dbDriver') and text()='{0}']\".format(db_driver)).click()\n log.info(\"设置数据库驱动: {0}\".format(db_driver))\n\n # 数据库URL\n if db_url:\n self.browser.find_element(By.XPATH, \"//*[@name='dbUrl']/preceding-sibling::input\").clear()\n self.browser.find_element(By.XPATH, \"//*[@name='dbUrl']/preceding-sibling::input\").send_keys(db_url)\n log.info(\"设置数据库URL: {0}\".format(db_url))\n\n # 用户名\n if username:\n self.browser.find_element(By.XPATH, \"//*[@name='username']/preceding-sibling::input\").clear()\n self.browser.find_element(By.XPATH, \"//*[@name='username']/preceding-sibling::input\").send_keys(username)\n log.info(\"设置用户名: {0}\".format(username))\n\n # 密码\n if pwd:\n try:\n # 判断是否是修改密码\n self.browser.find_element(\n By.XPATH, \"//*[@id='pwd']/following-sibling::span//a[contains(@class, 'edit')]\").click()\n except NoSuchElementException:\n pass\n self.browser.find_element(By.XPATH, \"//*[@name='pwd']/preceding-sibling::input\").send_keys(pwd)\n sleep(1)\n log.info(\"设置密码: {0}\".format(pwd))\n\n # 归属类型\n if belong_type:\n self.browser.find_element(By.XPATH, \"//*[@name='belongType']/preceding-sibling::input\").click()\n belong_type = \"外部库\"\n self.browser.find_element(\n By.XPATH, \"//*[contains(@id,'belongType') and text()='{0}']\".format(belong_type)).click()\n log.info(\"设置归属类型: {0}\".format(belong_type))\n\n # 数据类型\n if data_type:\n self.browser.find_element(By.XPATH, \"//*[@name='dataTypeId']/preceding-sibling::input\").click()\n self.browser.find_element(\n By.XPATH, \"//*[contains(@id,'dataTypeId') and text()='{0}']\".format(data_type)).click()\n log.info(\"设置数据类型: {0}\".format(data_type))\n\n def test(self, db_name):\n \"\"\"\n :param db_name: 数据库名称\n \"\"\"\n log.info(\"开始测试数据\")\n self.search(query={\"数据库名称\": db_name}, need_choose=True)\n self.browser.find_element(By.XPATH, \"//*[@id='editBtn']\").click()\n\n # 鉴于数据权限问题,在修改/删除数据时,需要判断是否有弹出框提示无权操作\n alert = BeAlertBox(timeout=2, back_iframe=False)\n exist = alert.exist_alert\n if exist:\n msg = alert.get_msg()\n gbl.temp.set(\"ResultMsg\", msg)\n else:\n wait = WebDriverWait(self.browser, 30)\n wait.until(ec.frame_to_be_available_and_switch_to_it((\n By.XPATH, \"//iframe[contains(@src,'dbCfgEdit.html?type=edit')]\")))\n\n self.browser.find_element(By.XPATH, \"//*[@id='testBtn']//*[text()='测试']\").click()\n alert = BeAlertBox(back_iframe=True, timeout=60)\n msg = alert.get_msg()\n if alert.title_contains(\"测试成功\"):\n log.info(\"{0} 测试成功\".format(db_name))\n else:\n log.warning(\"{0} 测试失败,测试返回结果: {1}\".format(db_name, msg))\n gbl.temp.set(\"ResultMsg\", msg)\n\n def delete(self, db_name):\n \"\"\"\n :param db_name: 数据库名称\n \"\"\"\n log.info(\"开始删除数据\")\n self.search(query={\"数据库名称\": db_name}, need_choose=True)\n self.browser.find_element(By.XPATH, \"//*[@id='delBtn']\").click()\n\n alert = BeAlertBox(back_iframe=False)\n msg = alert.get_msg()\n if alert.title_contains(\"您确定需要删除{0}吗\".format(db_name), auto_click_ok=False):\n alert.click_ok()\n sleep(1)\n alert = BeAlertBox(back_iframe=False)\n msg = alert.get_msg()\n if alert.title_contains(\"操作成功\"):\n log.info(\"{0} 删除成功\".format(db_name))\n else:\n log.warning(\"{0} 删除失败,失败提示: {1}\".format(db_name, msg))\n else:\n log.warning(\"{0} 删除失败,失败提示: {1}\".format(db_name, msg))\n gbl.temp.set(\"ResultMsg\", msg)\n\n def data_clear(self, db_name, fuzzy_match=False):\n \"\"\"\n :param db_name: 数据库名称\n :param fuzzy_match: 模糊匹配\n \"\"\"\n # 用于清除数据,在测试之前执行, 使用关键字开头模糊查询\n self.search(query={\"数据库名称\": db_name}, need_choose=False)\n fuzzy_match = True if fuzzy_match == \"是\" else False\n if fuzzy_match:\n record_element = self.browser.find_elements(\n By.XPATH, \"//*[@field='dbName']//*[starts-with(text(),'{0}')]\".format(db_name))\n else:\n record_element = self.browser.find_elements(\n By.XPATH, \"//*[@field='dbName']//*[text()='{0}']\".format(db_name))\n if len(record_element) == 0:\n # 查询结果为空,结束处理\n log.info(\"查询不到满足条件的数据,无需清理\")\n return\n\n exist_data = True\n while exist_data:\n pe = record_element[0]\n search_result = pe.text\n pe.click()\n log.info(\"选择: {0}\".format(search_result))\n # 删除\n self.browser.find_element(By.XPATH, \"//*[@id='delBtn']\").click()\n alert = BeAlertBox(back_iframe=False)\n msg = alert.get_msg()\n if alert.title_contains(\"您确定需要删除{0}吗\".format(search_result), auto_click_ok=False):\n alert.click_ok()\n alert = BeAlertBox(back_iframe=False)\n msg = alert.get_msg()\n if alert.title_contains(\"成功\"):\n log.info(\"{0} 删除成功\".format(search_result))\n page_wait()\n if fuzzy_match:\n # 重新获取页面查询结果\n record_element = self.browser.find_elements(\n By.XPATH, \"//*[@field='dbName']//*[text()='{0}']\".format(db_name))\n if len(record_element) == 0:\n # 查询结果为空,修改exist_data为False,退出循环\n log.info(\"数据清理完成\")\n exist_data = False\n else:\n break\n else:\n raise Exception(\"删除数据时出现未知异常: {0}\".format(msg))\n else:\n # 无权操作\n log.warning(\"{0} 清理失败,失败提示: {1}\".format(db_name, msg))\n gbl.temp.set(\"ResultMsg\", msg)\n break\n","repo_name":"pengw181/webappunit","sub_path":"src/main/python/core/app/VisualModeler/commonInfo/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":14846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"26011729561","text":"from models.USERS import USERS\nfrom cryptography.fernet import Fernet\nimport pandas as pd\nimport numpy as np\nfrom app import dbb\n\ndef user_verification(p_username,p_password):\n print (p_username)\n user = USERS.query.filter_by(username=p_username).first()\n if user:\n print(\"Success\")\n if decode(user.password) == p_password:\n print(user.role)\n return user\n else:\n print(\"Invalid Password\")\n return False\n else:\n print(\"Wrong Username\")\n return False\n\ndef encode(password):\n cipher_suite = Fernet(b'nIoDjdsM388MiEbvlAo3LVpbPeLME5uDEovGH_kXKFg=')\n cipher_text = cipher_suite.encrypt((password).encode('UTF-8'))\n cipher_text_final = cipher_suite.encrypt((cipher_text.decode('UTF-8')).encode('UTF-8'))\n cipher_text_final=cipher_text_final.decode('UTF-8')\n # print(cipher_text_final)\n return cipher_text_final\n\n\ndef decode(password):\n cipher_suite = Fernet(b'nIoDjdsM388MiEbvlAo3LVpbPeLME5uDEovGH_kXKFg=')\n password=password.encode('UTF-8')\n plain_text = cipher_suite.decrypt(password).decode('UTF-8')\n plain_text_final = cipher_suite.decrypt(plain_text.encode('UTF-8')).decode('UTF-8')\n return plain_text_final\n\n\n#------------------------------------------------------------------------------------------------------------\n\n\ndef get_project_list(username):\n from models.PROJECTS import PROJECTS\n projects=PROJECTS.query.filter_by(username=username).all()\n for p in projects:\n p.set()\n return projects\n\ndef get_project(project_id):\n from models.PROJECTS import PROJECTS\n proj=PROJECTS.query.filter_by(project_id=project_id).first()\n proj.set()\n return proj\n\n\ndef get_del_list(project_id):\n from models.DELIVERABLES import DELIVERABLES\n deliverables=DELIVERABLES.query.filter_by(project_id=project_id).all()\n for d in deliverables:\n d.set()\n return deliverables\n\ndef get_del(del_id):\n from models.DELIVERABLES import DELIVERABLES\n deliverable=DELIVERABLES.query.filter_by(del_id=del_id).first()\n deliverable.set()\n return deliverable\n\ndef get_activities(del_id):\n from models.ACTIVITIES import ACTIVITIES\n activities=ACTIVITIES.query.filter_by(del_id=del_id).all()\n return activities\n\n\ndef predict(proj_details):\n data = pd.read_csv(r\"C:\\Users\\Momin\\Desktop\\PROJECTS.xlsx\") # source file\n data.drop(data.columns[[0, 1, 2, 4, 5, 12, 14]], axis=1, inplace=True)\n data['phase'] = data['phase'].map({'Closure': 0, 'Initiation': 1, 'Planning': 2, 'Execution': 3})\n data['project_type'] = data['project_type'].map(\n {'Public Utilities': 0, 'Health Care': 1, 'Capital Goods': 2, 'Finance': 3, 'Consumer Services': 4,\n 'Transportation': 5, 'Technology': 6, 'Miscellaneous': 7, 'Basic Industries': 8, 'Consumer Non-Durables': 9,\n 'Consumer Durables': 10, 'Energy': 11, 'Other': 12, 'Development': 13})\n data['status'] = data['status'].map({'Completed': 0, 'In Process': 1})\n X = data.iloc[:, 0:7].values # specify labels 0-7 prediction\n y = data.iloc[:, 7].values # 8 to predict\n from sklearn.model_selection import train_test_split\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15, random_state=0)\n from sklearn.ensemble import RandomForestRegressor\n\n regressor = RandomForestRegressor(n_estimators=250, random_state=0)\n regressor.fit(X_train, y_train)\n\n type_list={'Public Utilities': 0, 'Health Care': 1, 'Capital Goods': 2, 'Finance': 3, 'Consumer Services': 4,\n 'Transportation': 5, 'Technology': 6, 'Miscellaneous': 7, 'Basic Industries': 8, 'Consumer Non-Durables': 9,\n 'Consumer Durables': 10, 'Energy': 11, 'Other': 12, 'Development': 13}\n\n project_type = type_list[proj_details[2]]\n days_alloted = int(proj_details[3])\n status = 0\n phase = 0\n priority = int(proj_details[4])\n portfolio_id = 6\n team_id = int(proj_details[5])\n arr = [project_type, days_alloted, status, phase, priority, portfolio_id, team_id]\n\n test = np.asarray(arr)\n test = test.reshape(1, -1)\n y_pred = regressor.predict(test)\n print(y_pred)\n return int(y_pred);\n\n\ndef cust_pred():\n import pandas as pd\n import numpy as np\n import random\n import sklearn.linear_model\n data = pd.read_csv(r\"C:\\Users\\Momin\\Desktop\\new_Customer.csv\") # source file\n data.drop(data.columns[[0, 4]], axis=1, inplace=True)\n data2 = data.groupby(['year', 'district'], as_index=False).count()\n data2.drop(['customer_name', 'country'], axis=1, inplace=True)\n ndistricts = np.unique(data2.district)\n data2 = pd.get_dummies(data=data2, columns=['district'])\n narray = np.asarray(data2)\n model = sklearn.linear_model.LinearRegression()\n X = narray[:, 2:len(ndistricts) + 2]\n y = narray[:, 1]\n model.fit(X, y)\n test = [1, 0, 0, 0, 0, 0,0] # balochistan[1,0,0,0,0,0,0]#ajk[0,0,1,0,0,0,0]#fata[0,0,0,1,0,0,0]#islamabad[0,0,0,0,1,0,0]#kpk[0,0,0,0,0,1,0]#punjab[0,0,0,0,0,0,1]#sindh\n test = np.asarray(test)\n test1 = test.reshape(1, -1)\n test1=model.predict(X=test1)\n # print(model.predict(X=test1))\n test = [0, 1, 0, 0, 0, 0,0] # balochistan[1,0,0,0,0,0,0]#ajk[0,0,1,0,0,0,0]#fata[0,0,0,1,0,0,0]#islamabad[0,0,0,0,1,0,0]#kpk[0,0,0,0,0,1,0]#punjab[0,0,0,0,0,0,1]#sindh\n test = np.asarray(test)\n test2 = test.reshape(1, -1)\n test2=model.predict(X=test2)\n # print(model.predict(X=test2))\n test = [0, 0, 1, 0, 0, 0,0] # balochistan[1,0,0,0,0,0,0]#ajk[0,0,1,0,0,0,0]#fata[0,0,0,1,0,0,0]#islamabad[0,0,0,0,1,0,0]#kpk[0,0,0,0,0,1,0]#punjab[0,0,0,0,0,0,1]#sindh\n test = np.asarray(test)\n test3 = test.reshape(1, -1)\n test3=model.predict(X=test3)\n # print(model.predict(X=test3))\n test = [0, 0, 0, 1, 0, 0,0] # balochistan[1,0,0,0,0,0,0]#ajk[0,0,1,0,0,0,0]#fata[0,0,0,1,0,0,0]#islamabad[0,0,0,0,1,0,0]#kpk[0,0,0,0,0,1,0]#punjab[0,0,0,0,0,0,1]#sindh\n test = np.asarray(test)\n test4 = test.reshape(1, -1)\n test4=model.predict(X=test4)\n # print(model.predict(X=test4))\n test = [0, 0, 0, 0, 1, 0,0] # balochistan[1,0,0,0,0,0,0]#ajk[0,0,1,0,0,0,0]#fata[0,0,0,1,0,0,0]#islamabad[0,0,0,0,1,0,0]#kpk[0,0,0,0,0,1,0]#punjab[0,0,0,0,0,0,1]#sindh\n test = np.asarray(test)\n test5 = test.reshape(1, -1)\n test5=model.predict(X=test5)\n # print(model.predict(X=test5))\n test = [0, 0, 0, 0, 0, 1,0] # balochistan[1,0,0,0,0,0,0]#ajk[0,0,1,0,0,0,0]#fata[0,0,0,1,0,0,0]#islamabad[0,0,0,0,1,0,0]#kpk[0,0,0,0,0,1,0]#punjab[0,0,0,0,0,0,1]#sindh\n test = np.asarray(test)\n test6 = test.reshape(1, -1)\n test6=model.predict(X=test6)\n # print(model.predict(X=test6))\n test = [0, 0, 0, 0, 0, 0,1] # balochistan[1,0,0,0,0,0,0]#ajk[0,0,1,0,0,0,0]#fata[0,0,0,1,0,0,0]#islamabad[0,0,0,0,1,0,0]#kpk[0,0,0,0,0,1,0]#punjab[0,0,0,0,0,0,1]#sindh\n test = np.asarray(test)\n test7 = test.reshape(1, -1)\n test7=model.predict(X=test7)\n print(test7)\n\n cust_orders={'Balochistan':float(test1),'SINDH':float(test2),'AJK':float(test3),'FATA':float(test4),'ISLAMABAD':float(test5),'KPK':float(test6),'PUNJAB':float(test7)}\n print(cust_orders)\n l=[int(test1),int(test2),int(test3),int(test4),int(test5),int(test6),int(test7)]\n return l\n # return cust_orders\n\ndef get_allocated_fund(project_id):\n from models.FINANCES import FINANCES\n allocated_fund=FINANCES.query.with_entities(FINANCES.allocation).filter_by(project_id=project_id).first()\n if(allocated_fund):\n return allocated_fund[0]\n else:\n return 0\n\ndef get_POs(project_id):\n from models.PO import PO\n PO_list=PO.query.with_entities(PO.r_id,PO.quantity,PO.status).filter_by(project_id=project_id).all()\n return PO_list\n\n#-------------------------------------------------------------------------------------------------------------------------\n\ndef get_members(username):\n from models.TEAM_MEMBERS import TEAM_MEMBERS;from models.TEAMS import TEAMS\n team = TEAMS.query.filter_by(team_leader=username).first()\n members=TEAM_MEMBERS.query.filter_by(team_id=team.team_id).all()\n return members\n\n\ndef get_leader_project_list(username):\n from models.TEAMS import TEAMS\n team = TEAMS.query.filter_by(team_leader=username).first()\n from models.PROJECTS import PROJECTS\n projects=PROJECTS.query.filter_by(team_id=team.team_id).all()\n for p in projects:\n p.set()\n return projects\n\n#------------------------------------------------------------------------------------------\n\ndef get_del_edit_details(proj_id,del_id):\n return dbb.execute('SELECT del_name,del_desc,priority FROM \\\"DELIVERABLES\\\" where del_id=' + str(del_id) + 'and project_id=' + str(proj_id))\n\ndef get_project_edit_details(proj_id):\n return dbb.execute('SELECT project_name,project_desc,priority FROM \\\"PROJECTS\\\" where project_id=' + str(proj_id))\n\n#portfolios\ndef get_portfolios():\n return dbb.execute('select pf.portfolio_name,p.project_name,SUM(p.quantity) as qty from \\\"PROJECTS\\\" as p,\\\"PORTFOLIO\\\" as pf where p.portfolio_id = pf.portfolio_id AND (CAST (end_date as text) BETWEEN \\'2017-10%%\\' AND \\'2018-01%%\\') GROUP BY pf.portfolio_name,p.project_name ORDER BY pf.portfolio_name')\n\n#quantity of projs\ndef mps(start_date,end_date):\n return dbb.execute('SELECT pf.portfolio_name, p.project_name, p.start_date, p.end_date, p.quantity FROM \\\"PROJECTS\\\" as p,\\\"PORTFOLIO\\\" as pf where (CAST(end_date as text) BETWEEN \\'2017-10%%\\' AND \\'2018-01%%\\') AND (p.portfolio_id = pf.portfolio_id) ORDER BY pf.portfolio_name')\n","repo_name":"Osaama01/fyptest","sub_path":"Functions.py","file_name":"Functions.py","file_ext":"py","file_size_in_byte":9502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"26118866873","text":"import bpy\n\n\n# selects all elements from specific channel\ndef select_all_from_channel(active_channel: int):\n context = bpy.context\n scene = context.scene\n\n\n sed = scene.sequence_editor\n # if active strip isn't in active_channel set to None.\n if getattr(sed.active_strip, \"channel\", -1) != active_channel:\n sed.active_strip = None\n\n sequences = sed.sequences_all\n # select all strips in active channel\n for strip in sequences:\n strip.select = strip.channel == active_channel\n \nselect_all_from_channel(2)","repo_name":"3dprogramin/blender-scripts","sub_path":"misc/select-all-from-channel.py","file_name":"select-all-from-channel.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"28851064828","text":"#!/usr/local/bin/python3\n# -*- coding:utf-8 -*-\n\"\"\"\n@author: \n@file: 面试题 08.11.硬币.py\n@time: 2020/4/23 09:23\n@desc: \n\"\"\"\n\"\"\"\n硬币。给定数量不限的硬币,币值为25分、10分、5分和1分,编写代码计算n分有几种表示法。(结果可能会很大,你需要将结果模上1000000007)\n示例1:\n输入: n = 5\n输出:2\n解释: 有两种方式可以凑成总金额:\n5=5\n5=1+1+1+1+1\n示例2:\n\n输入: n = 10\n输出:4\n解释: 有四种方式可以凑成总金额:\n10=10\n10=5+5\n10=5+1+1+1+1+1\n10=1+1+1+1+1+1+1+1+1+1\n说明:\n注意:\n你可以假设:\n0 <= n (总金额) <= 1000000\n\"\"\"\n\n\nclass Solution(object):\n def waysToChange(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n # 这个问题是两个非常经典的问题的组合,其一是“完全背包问题”,其二是“背包方案数问题”\n # dp[i][j] 使用前i种硬币计算j分的表示法种数 令coins=[25, 10, 5, 1]\n # 状态转移方程的推导:\n # dp[i][j] = dp[i-1][j] + dp[i-1][j-coins[i]] + dp[i-1][j-2coins[i]] + ... dp[i-1][j-kcoins[i]]\n # dp[i][j-coins[i]] = dp[i-1][j-coins[i]] + dp[i-1][j-2coins[i]] + ... dp[i-1][j-kcoins[i]]\n # dp[i][j] - dp[i][j-coins[i]] = dp[i-1][j]\n # dp[i][j] = dp[i][j-coins[i]] + dp[i-1][j]\n coins = [1,5,10,25]\n dp = [[0 for _ in range(n+1)] for _ in range(len(coins)+1)]\n for i in range(len(coins)+1):\n dp[i][0] = 1\n\n for i in range(1, len(coins)+1):\n for j in range(1, n+1):\n if j < coins[i-1]:\n dp[i][j] = dp[i-1][j]\n else:\n dp[i][j] = dp[i][j - coins[i-1]] + dp[i - 1][j]\n return dp[-1][-1] % 1000000007\n\n\na = Solution().waysToChange(5)\nprint(a)","repo_name":"starrye/LeetCode","sub_path":"all_topic/esay_topic/面试题 08.11.硬币.py","file_name":"面试题 08.11.硬币.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"21697862555","text":"# -*- coding: utf-8 -*-\n# UTF-8 encoding when using korean\nfrom collections import deque\n\ndx = [1,-1,0,0] #오른쪽, 왼쪽, 아래, 위\ndy = [0,0,1,-1]\n\ndef dfs(x,y):\n queue = deque()\n queue.append((x,y))\n \n while queue:\n x, y = queue.popleft()\n \n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n \n if nx < 0 or ny < 0 or nx >= len(map_[0]) or ny >= len(map_):\n continue\n \n if map_[ny][nx] == 0:\n continue\n \n if map_[ny][nx] == 1:\n map_[ny][nx] = map_[y][x] + 1\n queue.append((nx,ny))\n \n return map_[destination[0]][destination[1]]-1\n\nrow,col = map(int,input().split())\nmap_ = [[]for _ in range(row)]\n\nstart = [0,0]\ndestination = [row-1,col-1]\nanswer = []\n\nfor r in range(row):\n for c,char in enumerate(input()):\n if char == \"S\":\n start = [r,c]\n \n if char == \"E\":\n destination = [r,c]\n \n if char == \"#\":\n map_[r].append(0)\n else:\n map_[r].append(1)\n\nprint(dfs(start[0],start[1]))\n","repo_name":"oguuk/10weeks-codingtest-","sub_path":"8 그래프 탐색(DFS:BFS)/08F 미로 탈출하기.py","file_name":"08F 미로 탈출하기.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"5091265998","text":"from folium import Map, Popup, Icon, Marker, raster_layers, LayerControl\nfrom folium.plugins import MarkerCluster, Fullscreen\nimport json\nimport numpy as np\n\n\n# Make the Map with Folium\ndef make_map(lakes):\n if len(lakes) > 1:\n # Create NumPy arrays for latitude and longitude values.\n latitudes = np.array([lake[\"latitude\"] for lake in lakes if lake[\"latitude\"] != 0.0])\n longitudes = np.array([lake[\"longitude\"] for lake in lakes if lake[\"longitude\"] != 0.0])\n\n # Calculate the average latitude and longitude values using NumPy's `mean` function.\n location = [np.mean(latitudes), np.mean(longitudes)]\n\n # Create the map with the calculated location.\n folium_map = Map(\n width=\"100%\",\n height=\"100%\",\n max_width=\"100%\",\n min_height=\"100%\",\n location=location,\n allowFullScreen=\"True\",\n zoom_start=7,\n tiles=\"Stamen Terrain\"\n )\n\n # Add marker cluster and fullscreen plugin to the map.\n marker_cluster = MarkerCluster().add_to(folium_map)\n Fullscreen(\n position='topright',\n title='Expand me',\n title_cancel='Exit me',\n force_separate_button=False\n ).add_to(folium_map)\n\n # Iterate through the lakes and add markers to the map.\n for lake in lakes:\n if lake[\"latitude\"] != 0.0:\n html = f'''\n <h5 >{lake[\"lake\"]}</h5>\n <p style=\"color:red\">Date Stocked: {lake[\"date\"]}</p>\n <p style=\"color:green\">Stocked Amount: {lake[\"stocked_fish\"]}</p>\n <p style=\"color:cyan\">Species: {lake[\"species\"]}</p>\n <p style=\"color:orange\">Hatchery: {lake[\"hatchery\"]}</p>\n <a style=\"color:blue\" href=\"{lake[\"directions\"]}\" target=\"_blank\">Directions via Googlemaps </a>\n '''\n\n popup = Popup(html, max_width=400, lazy=True)\n location = (lake[\"latitude\"], lake[\"longitude\"])\n\n if lake[\"derby_participant\"]:\n marker = Marker(location=location, tooltip=lake[\"lake\"], popup=popup,\n icon=Icon(color='red', icon='trophy', prefix='fa'))\n else:\n marker = Marker(location=location, tooltip=lake[\"lake\"], popup=popup,\n icon=Icon(color='blue', icon='info', prefix='fa'))\n\n # Add the marker to the map.\n marker.add_to(marker_cluster)\n\n # Add OpenStreetMap layer and LayerControl to the map.\n raster_layers.TileLayer('OpenStreetMap').add_to(folium_map)\n LayerControl().add_to(folium_map)\n\n return folium_map._repr_html_()\n else:\n # return a blank map if there is no data\n return Map(location=[47.7511, -120.7401], zoom_start=7)._repr_html_()\n\n\ndef show_total_stocked_by_date_chart(lakes):\n if len(lakes) > 0:\n date_format = '%Y-%m-%d'\n dates, total_stocked_fish = zip(*lakes)\n date_strings = [date.strftime(date_format) for date in dates]\n chart_data = {\n 'labels': date_strings,\n 'datasets': [\n {\n 'label': 'Total Stocked Trout by Date',\n 'data': total_stocked_fish,\n 'backgroundColor': '#9fd3c7',\n 'borderColor': '#9fd3c7',\n 'color': '#9fd3c7',\n 'borderWidth': 1,\n 'pointRadius': 2\n }\n ]\n }\n chart_options = {\n 'scales': {\n 'y': {\n 'ticks': {'color': '#ececec', 'beginAtZero': True}\n },\n 'x': {\n 'ticks': {'color': '#ececec'}\n }\n }\n }\n chart_data_json = json.dumps(chart_data)\n chart_options_json = json.dumps(chart_options)\n graph_html = f'<canvas id=\"total-stocked-by-date-chart\"></canvas>\\n<script>\\nvar ctx = document.getElementById(\"total-stocked-by-date-chart\").getContext(\"2d\");\\nvar myChart = new Chart(ctx, {{ type: \"line\", data: {chart_data_json}, options: {chart_options_json} }});\\n</script>'\n return graph_html\n else:\n return f'<canvas id=\"total-stocked-by-date-chart\">NO DATA</canvas>'\n\n\ndef show_total_stocked_by_hatchery_chart(lakes):\n if len(lakes) > 0:\n hatcheries, total_stocked_fish = zip(*lakes)\n chart_data = {\n 'labels': hatcheries,\n 'datasets': [\n {\n 'label': 'Total Stocked Trout by Hatchery',\n 'data': total_stocked_fish,\n 'color': '#ececec',\n 'borderColor': '#9fd3c7',\n 'backgroundColor': '#9fd3c7',\n 'borderWidth': 1,\n 'pointRadius': 3\n }\n ]\n }\n chart_options = {\n 'scales': {\n 'y': {\n 'ticks': {'color': '#ececec', 'beginAtZero': True}\n },\n 'x': {\n 'ticks': {'color': '#ececec'}\n }\n }\n }\n chart_data_json = json.dumps(chart_data)\n chart_options_json = json.dumps(chart_options)\n graph_html = f'<canvas id=\"total-stocked-by-hatchery-chart\"></canvas>\\n<script>\\nvar ctx = document.getElementById(\"total-stocked-by-hatchery-chart\").getContext(\"2d\");\\nvar myChart = new Chart(ctx, {{ type: \"bar\", data: {chart_data_json}, options: {chart_options_json} }});\\n</script>'\n return graph_html\n else:\n return f'<canvas id=\"total-stocked-by-hatchery-chart\"></canvas>'\n","repo_name":"Thomas-Basham/washington-trout-stats","sub_path":"front_end/map_and_charts.py","file_name":"map_and_charts.py","file_ext":"py","file_size_in_byte":5062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"42145841070","text":"# more exercices from szkopul.edu.pl\n# also mateuszrus.pl/matura-rozszerzona-z-informatyki/\n# count a number of letters in random string\n\ndef lettercount(rstring):\n return {i:rsting.count(i) for i in rstring} # returns dictionary from string / count number of each character\n\n# \"Divide and conquer\"exercise for random number of \"1\" and \"0\":\n\n#!/usr/bin/env pythoon\n\nimport random as rr # to create random number like: \"111111110000\"\n\ndef st(n=10): # size of random number (default 10)\n r1 = int(rr.random()*n)\n print(r1)\n stt = \"1\"*r1 + \"0\"*(n-r1)\n print(stt) # shows random number like: \"111111110000000\"\n # nn is starting point from where we search left and right for \"1\" or \"0\"\n nn = 0\n # nnn is a step of how far we go for next search, half of what left\n nnn = int(n/2)\n while abs(nnn) > 0:\n if stt[nn + nnn] == \"1\":\n print((nn + nnn), \" - this is 1\")\n nn += nnn\n nnn = int(abs(nnn)/2)\n else:\n print((nn + nnn), \" - this is 0\")\n nn += nnn\n nnn = int(abs(nnn)/2)*(-1)\n nn+=int(stt[nn])\n return nn,\"-\",stt[nn]\n\n\n\n\n# 3-level XML file parser using ElementTree:\n\ndef xmlparse(xmlfile):\n import xml.etree.ElementTree as et # ElemtTree required library\n \n rr = et.parse(xmlfile).getroot()\n \n for i in rr:\n print(i.tag + \" - \" + i.text)\n if len(i): \n for j in i:\n print(\"-\" + j.tag + \" - \" + j.text)\n if len(j):\n for k in j:\n print(\"--\" + k.tag + \" - \" + k.text)\n \n# Snippet to change individual XML element value\n# Still need to find a way to check if element exist\n# @inprogress\n\ndef newgg(file1, file2, element1, value1):\n import xml.etree.ElementTree as et\n \n try: # in case missing file1\n source_tree = et.parse(file1)\n except:\n return \"NO SUCH FILE: \" + str(file1)\n source_root = source_tree.getroot()\n \n try: # in case missing elemnt1 in file1\n print(\"Old value: \" + source_root.find(element1).text)\n except:\n return \"NO SUCH ELEMENT: \" + str(element1) + \" IN FILE \" + str(file1)\n \n source_root.find(element1).text = value1\n print(\"New value: \" + str(value1))\n print(\"Saved to new file: \" + str(file2))\n source_tree.write(file2)\n \n# end\n \n \n\n\n","repo_name":"codeacade/py-random","sub_path":"szkopul.py","file_name":"szkopul.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"37685334172","text":"__author__ = \"Ian Haywood\"\n__license__ = \"GPL v2 or later (details at https://www.gnu.org)\"\n\n# stdlib\nimport sys\nimport os\nimport re\nimport logging\n\n\n_log = logging.getLogger('gm.bootstrapper')\n\nunformattable_error_id = 12345\n\n#===================================================================\nclass Psql:\n\n\tdef __init__ (self, conn):\n\t\t\"\"\"\n\t\tdb : the interpreter to connect to, must be a DBAPI compliant interface\n\t\t\"\"\"\n\t\tself.conn = conn\n\t\tself.vars = {'ON_ERROR_STOP': None}\n\n\t#---------------------------------------------------------------\n\tdef match(self, pattern):\n\t\tmatch = re.match(pattern, self.line)\n\t\tif match is None:\n\t\t\treturn 0\n\n\t\tself.groups = match.groups()\n\t\treturn 1\n\n\t#---------------------------------------------------------------\n\tdef fmt_msg(self, aMsg):\n\t\ttry:\n\t\t\ttmp = \"%s:%d: %s\" % (self.filename, self.lineno-1, aMsg)\n\t\t\ttmp = tmp.replace('\\r', '')\n\t\t\ttmp = tmp.replace('\\n', '')\n\t\texcept UnicodeDecodeError:\n\t\t\tglobal unformattable_error_id\n\t\t\ttmp = \"%s:%d: <cannot str(msg), printing on console with ID [#%d]>\" % (self.filename, self.lineno-1, unformattable_error_id)\n\t\t\ttry:\n\t\t\t\tprint('ERROR: GNUmed bootstrap #%d:' % unformattable_error_id)\n\t\t\t\tprint(aMsg)\n\t\t\texcept Exception: pass\n\t\t\tunformattable_error_id += 1\n\t\treturn tmp\n\n\t#---------------------------------------------------------------\n\tdef run (self, filename):\n\t\t\"\"\"\n\t\tfilename: a file, containing semicolon-separated SQL commands\n\t\t\"\"\"\n\t\t_log.debug('processing [%s]', filename)\n\t\tcurs = self.conn.cursor()\n\t\tcurs.execute('show session authorization')\n\t\tstart_auth = curs.fetchall()[0][0]\n\t\tcurs.close()\n\t\t_log.debug('session auth: %s', start_auth)\n\n\t\tif os.access (filename, os.R_OK):\n\t\t\tsql_file = open(filename, mode = 'rt', encoding = 'utf-8-sig')\n\t\telse:\n\t\t\t_log.error(\"cannot open file [%s]\", filename)\n\t\t\treturn 1\n\n\t\tself.lineno = 0\n\t\tself.filename = filename\n\t\tin_string = False\n\t\tbracketlevel = 0\n\t\tcurr_cmd = ''\n\t\tcurs = self.conn.cursor()\n\n\t\tfor self.line in sql_file:\n\t\t\tself.lineno += 1\n\t\t\tif len(self.line.strip()) == 0:\n\t\t\t\tcontinue\n\n\t\t\t# \\set\n\t\t\tif self.match(r\"^\\\\set (\\S+) (\\S+)\"):\n\t\t\t\t_log.debug('\"\\set\" found: %s', self.groups)\n\t\t\t\tself.vars[self.groups[0]] = self.groups[1]\n\t\t\t\tif self.groups[0] == 'ON_ERROR_STOP':\n\t\t\t\t\t# adjusting from string to int so that \"1\" -> 1 -> True\n\t\t\t\t\tself.vars['ON_ERROR_STOP'] = int(self.vars['ON_ERROR_STOP'])\n\t\t\t\tcontinue\n\n\t\t\t# \\unset\n\t\t\tif self.match (r\"^\\\\unset (\\S+)\"):\n\t\t\t\tself.vars[self.groups[0]] = None\n\t\t\t\tcontinue\n\n\t\t\t# other '\\' commands\n\t\t\tif self.match (r\"^\\\\(.*)\") and not in_string:\n\t\t\t\t# most other \\ commands are for controlling output formats, don't make\n\t\t\t\t# much sense in an installation script, so we gently ignore them\n\t\t\t\t_log.warning(self.fmt_msg(\"psql command \\\"\\\\%s\\\" being ignored \" % self.groups[0]))\n\t\t\t\tcontinue\n\n\t\t\t# non-'\\' commands\n\t\t\tthis_char = self.line[0]\n\t\t\t# loop over characters in line\n\t\t\tfor next_char in self.line[1:] + ' ':\n\n\t\t\t\t# start/end of string detected\n\t\t\t\tif this_char == \"'\":\n\t\t\t\t\tin_string = not in_string\n\n\t\t\t\t# detect \"--\"-style comments\n\t\t\t\tif this_char == '-' and next_char == '-' and not in_string:\n\t\t\t\t\tbreak\n\n\t\t\t\t# detect bracketing\n\t\t\t\tif this_char == '(' and not in_string:\n\t\t\t\t\tbracketlevel += 1\n\t\t\t\tif this_char == ')' and not in_string:\n\t\t\t\t\tbracketlevel -= 1\n\n\t\t\t\t# have we:\n\t\t\t\t# - found end of command ?\n\t\t\t\t# - are not inside a string ?\n\t\t\t\t# - are not inside bracket pair ?\n\t\t\t\tif not ((in_string is False) and (bracketlevel == 0) and (this_char == ';')):\n\t\t\t\t\tcurr_cmd += this_char\n\t\t\t\telse:\n\t\t\t\t\tif curr_cmd.strip() != '':\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tcurs.execute(curr_cmd)\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tdata = curs.fetchall()\n\t\t\t\t\t\t\t\t_log.debug('cursor data: %s', data)\n\t\t\t\t\t\t\texcept Exception:\t# actually: psycopg2.ProgrammingError but no handle\n\t\t\t\t\t\t\t\tpass\n\t\t\t\t\t\texcept Exception as error:\n\t\t\t\t\t\t\t_log.exception(curr_cmd)\n\t\t\t\t\t\t\tif re.match(r\"^NOTICE:.*\", str(error)):\n\t\t\t\t\t\t\t\t_log.warning(self.fmt_msg(error))\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t_log.error(self.fmt_msg(error))\n\t\t\t\t\t\t\t\tif hasattr(error, 'diag'):\n\t\t\t\t\t\t\t\t\tfor prop in dir(error.diag):\t\t\t# pylint: disable=no-member\n\t\t\t\t\t\t\t\t\t\tif prop.startswith('__'):\n\t\t\t\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t\t\t\tval = getattr(error.diag, prop)\t\t# pylint: disable=no-member\n\t\t\t\t\t\t\t\t\t\tif val is None:\n\t\t\t\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t\t\t\t_log.error('PG diags %s: %s', prop, val)\n\t\t\t\t\t\t\t\tif self.vars['ON_ERROR_STOP']:\n\t\t\t\t\t\t\t\t\tself.conn.commit()\n\t\t\t\t\t\t\t\t\tcurs.close()\n\t\t\t\t\t\t\t\t\treturn 1\n\n\t\t\t\t\tself.conn.commit()\n\t\t\t\t\tcurs.close()\n\t\t\t\t\tcurs = self.conn.cursor()\n\t\t\t\t\tcurr_cmd = ''\n\n\t\t\t\tthis_char = next_char\n\t\t\t# end of loop over chars\n\n\t\t# end of loop over lines\n\t\tself.conn.commit()\n\t\tcurs.execute('show session authorization')\n\t\tend_auth = curs.fetchall()[0][0]\n\t\tcurs.close()\n\t\t_log.debug('session auth after sql file processing: %s', end_auth)\n\t\tif start_auth != end_auth:\n\t\t\t_log.error('session auth changed before/after processing sql file')\n\n\t\treturn 0\n\n#===================================================================\n# testing code\nif __name__ == '__main__':\n\n\tif len(sys.argv) < 2:\n\t\tsys.exit()\n\n\tif sys.argv[1] != 'test':\n\t\tsys.exit()\n\n\t#conn = PgSQL.connect(user='gm-dbo', database = 'gnumed')\n\t#psql = Psql(conn)\n\t#psql.run(sys.argv[1])\n\t#conn.close()\n","repo_name":"ncqgm/gnumed","sub_path":"gnumed/gnumed/client/pycommon/gmPsql.py","file_name":"gmPsql.py","file_ext":"py","file_size_in_byte":5186,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"94"} +{"seq_id":"6356120193","text":"\"\"\"\n台湾大公园\n描述\n\n由于五寒肺炎的关系,台湾大公园决定对游客进行管制。 管制办法以及流程如下:\n管理处会先订出数字 M 以及数字 N,数字 M 代表台湾大公园目前开放的区域数量,而数字 N 则是代表管理处开放让一个游客可以游览的区域数量。\n接着会公布这 M 个区域目前已经入场的游客数量,再让游客提出想要游览的区域愿望清单。\n在游客提出的愿望清单当中,会由管理处分配目前入场人数较少的 N 个区域给该游客游览,排列方式由目前来客数少的区域排列到多的区域。\n请写一个程式帮助管理处分配游客游览区域。\n\n\n输入\n输入包含 M + 3 行:\n第一行包含一个整数 M,代表所开放的区域数量。\n第二至 M + 1 行则各包含一个字母以及一个整数,以空白分隔,代表这 M 个区域目前所在的人数。\n第 M + 2 行包含数个字母,代表该游客想要游览的愿望清单。\n第 M + 3 行则是包含一个整数 N,代表可以开放给该游客游览的区域数量。\n\n\n输出\n输出为一行,包含数个字母,为管理处提供给该游客的游览区域顺序\n\n\n输入范例 1 \n3\nA 1\nB 2\nC 3\nB C\n1\n\n输出范例 1\nB\n\n输入范例 2 \n6\nY 20\nF 50\nM 49\nX 13\nS 28\nO 96\nS Y X\n3\n\n输出范例 2\nX Y S\n\n来源\nccClub Judge\n\"\"\"\ndef people_sort(s):\n return int(s[1])\n\nM = int(input())\ncomplete_list = [input().split() for i in range(M)]\nwish_list = input().split()\nN = int(input())\n\ncomplete_list.sort(key=people_sort)\nfiltered_list = [item for item in complete_list if item[0] in wish_list]\n\nfor i in range(N):\n if i == N - 1: \n print(filtered_list[i][0])\n else:\n print(filtered_list[i][0], end=' ')","repo_name":"Fionn88/ccClub-Practice","sub_path":"judge/201204.py","file_name":"201204.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"294557527","text":"\"\"\" Evaluate BLEU for FactorizedNet or LoraNet model\n\nExample:\npython eval_lorank.py evaltools.experiment=runs/e2e_nlg/e64_l1e-05_b32_f1.0_nsgd_m0.9_w0.01_nanone_nu100_namdistillgpt2_namelora_r0.1_leepoch_srandom_t0\n\n\"\"\"\nimport os\nimport os.path as osp\nimport logging\nimport csv\nimport argparse\nfrom tqdm import tqdm\nimport warnings\nfrom itertools import groupby\n\nimport torch\nimport torch.nn as nn\nfrom datasets import load_dataset\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\nfrom transformers.generation import GenerationConfig\n\nfrom utils.pymteval import BLEUScore, NISTScore\nimport peftnet as pn\nfrom utils.utils import load_object, get_ignore_list_e2e\n\n\ndef get_data(dataset_name, dataset_cache):\n # Load dataset\n assert dataset_name in [\"eli5\", \"e2e_nlg\"], \"Dataset not supported\"\n\n test_split = {\"eli5\": \"validation_asks\", \"e2e_nlg\": \"test\"}[dataset_name]\n\n test_dataset = load_dataset(\n dataset_name, split=test_split, cache_dir=dataset_cache\n ).flatten()\n return test_dataset\n\n\ndef evaluate_model(cmodel, test_dataset, tokenizer, device=None, batch_size=8,\n output_path_preds=None, output_path_refs=None, compute_bleu=True):\n cmodel.eval()\n # Overwrite output files\n if output_path_preds is not None and os.path.exists(output_path_preds):\n os.remove(output_path_preds)\n\n if output_path_refs is not None and os.path.exists(output_path_refs):\n os.remove(output_path_refs)\n\n # Used to ensure that the number of predictions and references are equal\n num_mrs = 0\n num_preds = 0\n\n with torch.no_grad():\n logging.info(\"=> Testing model bleu scores (Device={}) ...\".format(device))\n BLEU = BLEUScore()\n NIST = NISTScore()\n\n # Initialize model\n cmodel.to(device)\n model_fn = cmodel.module if isinstance(cmodel, nn.DataParallel) else cmodel\n if any([isinstance(model_fn, k) for k in [pn.RosaNet, pn.LoraNet, pn.IA3Net]]):\n model_fn = model_fn.peft_model\n else:\n model_fn = model_fn\n\n model_fn.eval()\n gen_cfg = GenerationConfig(\n no_repeat_ngram_size=4,\n num_beams=5,\n max_length=512,\n pad_token_id=tokenizer.pad_token_id,\n eos_token_id=tokenizer.eos_token_id,\n )\n\n # Sort dataset by 'meaning_representation' to ensure all similar items are together\n sorted_dataset = sorted(test_dataset, key=lambda x: x['meaning_representation'])\n\n # Group the sorted dataset by `meaning_representation`\n grouped_data = [list(group) for key, group in\n groupby(sorted_dataset, key=lambda x: x['meaning_representation'])]\n\n # Combine all references for each group\n grouped_data = [\n {\n \"meaning_representation\": group[0]['meaning_representation'],\n \"human_reference\": [item['human_reference'] for item in group]\n }\n for group in grouped_data\n ]\n\n num_pts = len(grouped_data)\n\n # Start of block where warnings are suppressed\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n\n for i in tqdm(range(0, num_pts, batch_size)):\n batch = grouped_data[i:i + batch_size]\n\n input_strs = [\"Input: {} Output: \".format(dp['meaning_representation']) for dp in batch]\n references = [item['human_reference'] for item in batch]\n\n inputs = tokenizer(\n input_strs,\n return_tensors=\"pt\",\n padding=True,\n truncation=True,\n max_length=512,\n )\n\n output_ids = model_fn.generate(\n input_ids=inputs['input_ids'].to(device),\n attention_mask=inputs['attention_mask'].to(device),\n generation_config=gen_cfg,\n )\n\n output_strs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)\n output_strs = [\n out.replace(\"Input: {} Output: \".format(dp['meaning_representation']), \"\").strip() for out, dp in\n zip(output_strs, batch)\n ]\n\n if output_path_preds is not None and output_path_refs is not None:\n # Append references to `output_path_refs`\n with open(output_path_refs, \"a\", encoding='utf-8') as f:\n writer = csv.writer(f)\n for refs in references:\n try:\n rw = [[ref] for ref in refs]\n writer.writerows(rw)\n except:\n raise ValueError(\"References must be a list of strings. Got refs: {}\".format(refs))\n writer.writerow([])\n num_mrs += 1\n\n with open(output_path_preds, \"a\", encoding='utf-8') as f:\n writer = csv.writer(f)\n for pred in output_strs:\n writer.writerow([pred])\n num_preds += 1\n\n assert num_mrs == num_preds, \"Number of predictions and references must be equal\"\n\n if compute_bleu:\n for output_str, reference in zip(output_strs, references):\n BLEU.append(output_str, reference)\n NIST.append(output_str, reference)\n\n # Remove last newline from `output_path_refs`\n with open(output_path_refs, 'rb+') as f:\n f.seek(0, os.SEEK_END)\n pos = f.tell() - 1\n while pos > 0 and f.read(1) != b\"\\n\":\n pos -= 1\n f.seek(pos, os.SEEK_SET)\n if pos > 0:\n f.seek(pos, os.SEEK_SET)\n f.truncate()\n\n return {\n \"bleu\": BLEU.score() if compute_bleu else None,\n \"nist\": NIST.score() if compute_bleu else None,\n }\n\n\n# https://stackoverflow.com/questions/76465343/huggingface-transformers-model-config-reported-this-is-a-deprecated-strategy-to\n# @hydra.main(version_base=None, config_path=\"./\", config_name=\"configs\")\ndef evaluate_experiment(experiment_root, test_dataset, overwrite=False, min_records=620, all=False):\n experiment_args = load_object(osp.join(experiment_root, \"args.pkl\"))\n model_names = [name for name in os.listdir(experiment_root) if name.startswith(\"model_\") and name.endswith(\".pth\")]\n\n if all:\n print(\"\\t=> Evaluating all {} models\".format(len(model_names)))\n else:\n model_names = [\"model_best.pth\"]\n print(\"\\t=> Evaluating latest model only\".format(len(model_names)))\n\n for model_name in model_names:\n output_path_refs = osp.join(experiment_root, \"test_references.txt\")\n output_filename = model_name.replace(\"model_\", \"test_predictions_\").replace(\".pth\", \".txt\")\n output_path_preds = osp.join(experiment_root, output_filename)\n\n # Check number of lines in output_path_preds\n if osp.exists(output_path_preds) and not overwrite:\n # ~/scratch/rosa/runs/e2e_nlg/e5_l0.0005_b10_f1.0_s512_mTrue_nadamw_mo0.9_w0.01_nalinear_nu500_namgpt2_namenone_r0.5_leepoch_sarandom_cTrue_t0\n with open(output_path_preds, \"r\") as f:\n num_lines = sum(1 for _ in f)\n if num_lines >= min_records:\n print(\n \"\\t=> Skipping evaluation. {} already exists and has {} records\".format(\n output_path_preds, num_lines\n ))\n continue\n\n # Define model\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n model = AutoModelForCausalLM.from_pretrained(experiment_args['model']['name'])\n tokenizer = AutoTokenizer.from_pretrained(experiment_args['model']['name'])\n tokenizer.pad_token = tokenizer.eos_token\n\n # Factorize & Load pretrained model\n ignore_list = get_ignore_list_e2e(model) if experiment_args['train']['ignore_list'] else None\n cmodel = {\n \"rosa\": pn.RosaNet,\n \"lora\": pn.LoraNet,\n \"ia3\": pn.IA3Net,\n \"none\": lambda x, **kwargs: x\n }[experiment_args['fnmodel']['name'].lower()](\n model, ignore_list=ignore_list, **experiment_args['fnmodel']['params']\n )\n\n print(\"\\t=> Loading model {} ...\".format(model_name))\n print(\"\\t=> Using device {}\".format(device))\n dct_best = torch.load(osp.join(experiment_root, model_name))\n cmodel.load_state_dict(dct_best['model_state_dict'])\n cmodel.to(device)\n\n if experiment_args['dataset']['name'] != \"e2e_nlg\":\n raise NotImplementedError(\"Dataset {} not supported\".format(experiment_args['dataset']['name']))\n\n evaluate_model(\n cmodel,\n test_dataset=test_dataset,\n tokenizer=tokenizer,\n device=device,\n output_path_preds=output_path_preds,\n output_path_refs=output_path_refs,\n compute_bleu=False,\n )\n\n\ndef main(args):\n test_dataset = get_data(args.dataset, args.cache)\n if args.experiment == '':\n experiments = [osp.join(args.root, d) for d in os.listdir(args.root) if osp.isdir(osp.join(args.root, d))]\n for i, experiment in enumerate(experiments):\n print(\"\\n=> [{}/{}] Evaluating experiment {}\".format(i + 1, len(experiments), experiment))\n evaluate_experiment(experiment, test_dataset=test_dataset, all=args.all)\n else:\n print(\"=> Generating predictions for experiment {}\".format(args.experiment))\n evaluate_experiment(args.experiment, test_dataset=test_dataset, all=args.all)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-e', '--experiment', type=str, default='', required=False, help='Experiment directory')\n parser.add_argument('-a', '--all', action='store_true', help='Evaluate all model.pth weights')\n parser.add_argument('-r', '--root', type=str, default='', help='Root directory of many experiments')\n parser.add_argument('-d', '--dataset', type=str, default='e2e_nlg', help='Dataset name')\n parser.add_argument('-c', '--cache', type=str, default='/home/mila/m/marawan.gamal/.cache/huggingface',\n help='Dataset cache directory')\n args = parser.parse_args()\n\n assert args.experiment != '' or args.root != '', \"Either experiment or root must be specified\"\n main(args)\n","repo_name":"marawangamal/rosa","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":10648,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"34865673486","text":"from django.core.management.base import BaseCommand\nfrom faker import Faker\n\nimport game.models\nfrom game.models import Game, GameActivity\n\n\ndef populate_games(self, N):\n faker = Faker()\n\n for i in range(N):\n status = faker.random_int(min=1, max=3)\n\n if status == game.models.STATUS_SCHEDULED:\n team_one_score = 0\n team_two_score = 0\n winning_team = None\n if status == game.models.STATUS_STARTED:\n team_one_score = faker.random_int(min=1, max=100)\n team_two_score = faker.random_int(min=1, max=100)\n winning_team = None\n if status == game.models.STATUS_END:\n team_one_score = faker.random_int(min=1, max=100)\n team_two_score = faker.random_int(min=1, max=100)\n if team_one_score > team_two_score:\n winning_team = i * 2 + 1\n else:\n winning_team = i * 2 + 2\n Game.objects.create_game(\n name='Game ' + str(i),\n team_one=i * 2 + 1,\n team_two=i * 2 + 2,\n team_one_score=team_one_score,\n team_two_score=team_two_score,\n winning_team_id=winning_team,\n round=1,\n status=status\n )\n pass\n\n\ndef populate_game_activities(self, N):\n faker = Faker()\n\n for i in range(N):\n GameActivity.objects.create_game_activity(\n game_id=i % 8 + 1,\n player_id=faker.random_int(min=1, max=160),\n team_id=faker.random_int(min=1, max=16),\n activity=faker.random_int(min=0, max=1),\n )\n pass\n\n\nclass Command(BaseCommand):\n\n def handle(self, *args, **options):\n populate_games(self, N=8)\n populate_game_activities(self, N=300)\n\n self.stdout.write(self.style.SUCCESS('Successful'))\n","repo_name":"HeshaniR/matific-lms","sub_path":"game/management/commands/gamefaker.py","file_name":"gamefaker.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"4843358285","text":"\"\"\"\nМладшая сестра Фёдора - Соня пишет сочинение по литературе и отправляет файлы учительнице.\n\nФёдор знает, что Соня никогда не ставит заглавные буквы, так как для набора текста использует компьютер. Пока никто\nне видит, Фёдор решил внести исправления в сочинение сестры и написал функцию capitalize(s), которая принимает на\nвход исходную строку s и возвращает строку с восстановленными заглавными буквами.\n\nФункция работает по следующему алгоритму:\n* сделать заглавной первую букву в строке, не считая пробелы;\n* сделать заглавной первую букву после точки, восклицательного или вопросительного знака, не считая пробелы.\n\nПример\n\nВвод\nна этом заканчиваю свое сочинение. поставьте пятерку, Мария Ивановна! я очень старалась!\n\nВывод\nНа этом заканчиваю свое сочинение. Поставьте пятерку, Мария Ивановна! Я очень старалась!\n\"\"\"\n\n\ndef capitalize(text):\n for sign in ('.', '!', '?'):\n text = text.replace(sign, sign + ' ')\n # to_list = text.replace('.', '. ').replace('!', '! ').replace('?', '? ').split(' ')\n return ' '.join(map(lambda s: s.strip()[:1].upper() + s.strip()[1:], text.split(' ')))\n\n\n# Прошел только этот вариант (((\n\n# def capitalize(text):\n# text = list(text)\n# text[0] = text[0].upper()\n# for i in range(len(text) - 2):\n# if text[i] in '!.?':\n# text[i + 2] = text[i + 2].upper()\n#\n# return ''.join(text)\n\n\n# test\nprint(capitalize('на этом заканчиваю свое сочинение. оставьте пятерку, Мария Ивановна! я очень старалась! '))\n","repo_name":"alekgs/python_progs","sub_path":"Class_2023_Тимофей/5. final-1.5.py","file_name":"5. final-1.5.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"43308902912","text":"#!/bin/python\n\n# script to compute PDFs of the attitude angles and the heave of the RV-Meteor, LIMRAD94 and the Arduino during Eurec4a\n# for different time resolutions\n# new feature: plot time series of heave, pitch and roll data\n\n# import libraries\nimport matplotlib\n# matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport numpy as np\nimport pandas as pd\nimport datetime\nimport os\nimport glob\nimport re\nimport sys\n# just needed to find pyLARDA from this location\nsys.path.append('/projekt1/remsens/work/jroettenbacher/Base/larda/')\nsys.path.append('.')\n\nimport pyLARDA\nimport pyLARDA.helpers as h\nimport datetime\nimport logging\n\nlog = logging.getLogger('pyLARDA')\nlog.setLevel(logging.WARNING)\nlog.addHandler(logging.StreamHandler())\n\n# Load LARDA\nlarda = pyLARDA.LARDA().connect('eurec4a', build_lists=True)\n\n# local paths JR\n# input_path = \"./data\"\n# output_path = \"./plots\"\n\n# # Catalpa Paths\n# input_path = \"/home/remsens/data\"\n# output_path = \"/home/remsens/code/larda3/scripts/plots/PDFs_attitude_heave/\"\n\n# Catalpa Paths from JR Laptop\ninput_path = \"/projekt2/remsens/data/campaigns/eurec4a\"\noutput_path = \"/projekt1/remsens/work/jroettenbacher/plots/attitude\"\n\n# define date range, only works for one day at the time\nbegin_dt = datetime.datetime(2020, 2, 6, 0, 0, 0)\nend_dt = datetime.datetime(2020, 2, 10, 23, 59, 59)\n\n########################################################################################################################\n# Data Read In\n########################################################################################################################\n# RV-Meteor\n# list all files in directory, select the date range, then read them in and concat them\nall_files = sorted(glob.glob(os.path.join(input_path + \"/RV-METEOR_DSHIP/*_10Hz.dat\")))\nfile_list = []\nfor f in all_files:\n # match anything (.*) and the date group (?P<date>) consisting of 8 digits (\\d{8})\n match = re.match(r\".*(?P<date>\\d{8})\", f)\n # convert string to datetime\n date_from_file = datetime.datetime.strptime(match.group('date'), '%Y%m%d')\n if begin_dt <= date_from_file <= end_dt:\n file_list.append(f)\nrv_meteor = pd.concat(pd.read_csv(f, encoding='windows-1252', sep=\"\\t\", skiprows=(1, 2), index_col='date time')\n for f in file_list)\nrv_meteor.index = pd.to_datetime(rv_meteor.index, infer_datetime_format=True)\nrv_meteor.index.name = 'datetime'\nrv_meteor.columns = ['Heading [°]', 'Heave [m]', 'Pitch [°]', 'Roll [°]']\n\n# Action Log, read in action log of CTD actions\n# file_list = sorted(glob.glob(os.path.join(input_path + r\"DSHIP_RV-METEOR/*_RV-Meteor_device_action_log.dat\")))\n# rv_meteor_action = pd.concat((pd.read_csv(f, encoding='windows-1252', sep=\"\\t\")\n# for f in file_list), join='outer')\n# rv_meteor_action = pd.read_csv(f\"{input_path}DSHIP_RV-METEOR/{begin_dt:%Y%m%d}_RV-Meteor_device_action_log.dat\",\n# encoding='windows-1252', sep='\\t')\n# rv_meteor_action[\"Date/Time (Start)\"] = pd.to_datetime(rv_meteor_action[\"Date/Time (Start)\"],\n# infer_datetime_format=True)\n# rv_meteor_action = rv_meteor_action.loc[begin_dt:end_dt]\n\n########################################################################################################################\n# LIMRAD94\n\nradar_roll = larda.read(\"LIMRAD94_cn_input\", \"Inc_El\", [begin_dt, end_dt])\nradar_pitch = larda.read(\"LIMRAD94_cn_input\", \"Inc_ElA\", [begin_dt, end_dt])\n\n########################################################################################################################\n# Arduino\n# read in measurement data\nall_files = sorted(glob.glob(os.path.join(input_path + \"/ARDUINO/final_daily_files/*_attitude_arduino.csv\")))\nfile_list = []\nfor f in all_files:\n # match anything (.*) and the date group (?P<date>) consisting of 8 digits (\\d{8})\n match = re.match(r\".*(?P<date>\\d{8}).*\", f)\n # convert string to datetime\n date_from_file = datetime.datetime.strptime(match.group('date'), '%Y%m%d')\n if begin_dt <= date_from_file <= end_dt:\n file_list.append(f)\narduino = pd.concat(pd.read_csv(f, encoding='windows-1252', sep=\",\", usecols=[1, 2, 3, 4], index_col='datetime')\n for f in file_list)\narduino.index = pd.to_datetime(arduino.index, infer_datetime_format=True)\narduino.index.name = \"datetime\"\n# rename columns\narduino.columns = ['Heading [°]', 'Pitch [°]', 'Roll [°]']\n\n########################################################################################################################\n# PDF Plotting\n########################################################################################################################\n# RV-Meteor\n# variables = [\"Heave [m]\", \"Pitch [°]\", \"Roll [°]\"]\n# # subsample data by 0.5, 1 and 3 seconds\n# rv_meteor_500ms = rv_meteor.resample(\"0.5S\").mean()\n# rv_meteor_1s = rv_meteor.resample(\"1S\").mean()\n# rv_meteor_3s = rv_meteor.resample(\"3S\").mean()\n# plot PDFs for each variable\n# plt.style.use('ggplot')\n# plt.rcParams.update({'font.size': 16, 'figure.figsize': (16, 9)})\n# for var in variables:\n# # plot PDF, 40 bins\n# plt.hist([rv_meteor_3s[f'{var}'], rv_meteor_1s[f'{var}'], rv_meteor_500ms[f'{var}'], rv_meteor[f\"{var}\"]],\n# bins=40, density=True, histtype='bar', log=True, label=[\"3s\", \"1s\", \"0.5s\", \"0.1s\"])\n# plt.legend(title=\"Sampling Frequency\")\n# plt.xlabel(f\"{var}\")\n# plt.ylabel(\"Probability Density\")\n# plt.title(f\"Probability Density Function of {var[:-4]} Motion - DSHIP\\n\"\n# f\" EUREC4A RV-Meteor {begin_dt:%Y-%m-%d} - {end_dt:%Y-%m-%d}\")\n# plt.tight_layout()\n# # plt.show()\n# filename = f\"{output_path}RV-Meteor_Seapath_{var[:-4]}_PDF_{begin_dt:%Y%m%d}-{end_dt:%Y%m%d}_log.png\"\n# plt.savefig(filename, dpi=300)\n# plt.close()\n# print(f\"Figure saved to {filename}\")\n########################################################################################################################\n# LIMRAD94\n# variables = [radar_roll, radar_pitch]\n# names = [\"Roll\", \"Pitch\"]\n# for var, name in zip(variables, names):\n# # plot PDF, 40 bins\n# plt.hist(var['var'], bins=40, density=True, histtype='bar', log=True, label=\"Median: 1.88s\")\n# plt.legend(title=\"Sampling Frequency\")\n# plt.xlabel(f\"{name}\")\n# plt.ylabel(\"Probability Density\")\n# plt.title(f\"Probability Density Function of {name} Motion - LIMRAD94\\n\"\n# f\" EUREC4A RV-Meteor {begin_dt:%Y-%m-%d} - {end_dt:%Y-%m-%d}\")\n# plt.tight_layout()\n# # plt.show()\n# filename = f\"{output_path}RV-Meteor_LIMRAD94_{name}_PDF_{begin_dt:%Y%m%d}-{end_dt:%Y%m%d}_log.png\"\n# plt.savefig(filename, dpi=300)\n# plt.close()\n# print(f\"Figure saved to {filename}\")\n\n# plot time series of roll and pitch\n\n# fig, axs = plt.subplots(2, 1, sharex=True)\n# axs[0].plot(radar_roll['var'], label='Roll')\n# axs[1].plot(radar_pitch['var'], label='Pitch')\n# axs[0].set_ylabel(\"Roll [°]\")\n# axs[1].set_ylabel(\"Pitch [°]\")\n# axs[1].set_xlabel(\"Datetime [UTC]\")\n# title = \"LIMRAD94 Roll and Pitch Motion\\n EUREC4A RV-Meteor \"\n# if end_dt.date() > begin_dt.date():\n# axs[0].set_title(f\"{title}{begin_dt:%Y-%m-%d} - {end_dt:%Y-%m-%d}\")\n# filename = f\"{output_path}RV-Meteor_LIMRAD94_roll-pitch_{begin_dt:%Y%m%d}-{end_dt:%Y%m%d}.png\"\n# fig.savefig(filename, dpi=250)\n# print(f\"{filename} saved to {output_path}\")\n# else:\n# axs[0].set_title(f\"{title}{begin_dt:%Y-%m-%d}\")\n# filename = f\"{output_path}RV-Meteor_LIMRAD94_roll-pitch_{begin_dt:%Y%m%d}.png\"\n# fig.savefig(filename, dpi=250)\n# print(f\"{filename} saved to {output_path}\")\n# fig.close()\n########################################################################################################################\n# ARDUINO attitude sensor\n# variables = [\"Pitch [°]\", \"Roll [°]\"]\n# # subsample data by 0.5, 1 and 3 seconds\n# arduino_500ms = arduino.resample(\"0.5S\").mean()\n# arduino_1s = arduino.resample(\"1S\").mean()\n# arduino_3s = arduino.resample(\"3S\").mean()\n# plot PDFs for each variable\n# plt.style.use('ggplot')\n# plt.rcParams.update({'font.size': 16, 'figure.figsize': (16, 9)})\n# for var in variables:\n# # plot PDF, 40 bins\n# plt.hist([arduino_3s[f'{var}'], arduino_1s[f'{var}'], arduino_500ms[f'{var}'], arduino[f\"{var}\"]],\n# bins=40, density=True, histtype='bar', log=True, label=[\"3s\", \"1s\", \"0.5s\", \"0.25s\"])\n# plt.legend(title=\"Sampling Frequency\")\n# plt.xlabel(f\"{var}\")\n# plt.ylabel(\"Probability Density\")\n# plt.title(f\"Probability Density Function of {var[:-4]} Motion - Arduino sensor\\n\"\n# f\" EUREC4A RV-Meteor {begin_dt:%Y-%m-%d} - {end_dt:%Y-%m-%d}\")\n# plt.tight_layout()\n# # plt.show()\n# filename = f\"{output_path}RV-Meteor_Arduino_attitude_{var[:-4]}_PDF_{begin_dt:%Y%m%d}-{end_dt:%Y%m%d}_log.png\"\n# plt.savefig(filename, dpi=300)\n# plt.close()\n# print(f\"Figure saved to {filename}\")\n########################################################################################################################\n# Plot attitude angles\n########################################################################################################################\n# RV-Meteor\n\n# set date range to plot\n# begin_dt = datetime.datetime(2020, 1, 31, 0, 0, 0)\n# end_dt = datetime.datetime(2020, 2, 2, 23, 59, 59)\n# use 3 second averaged data to decrease number of points to plot, drop Heading\n# rv_meteor_plot = rv_meteor_3s.drop('Heading [°]', axis=1).loc[begin_dt:end_dt]\n# subset ctd data\n# ctd_plot = rv_meteor_action.loc[begin_dt:end_dt]\n\n# plot style options\nplt.style.use('ggplot')\n# plt.rcParams.update({'figure.figsize': (11/2.54, 3.5/2.54)})\n# set date tick locator and formater\nhfmt = mdates.DateFormatter('%d/%m/%y %H')\n\n########################################################################################################################\n# plot heave, pitch and roll RV-Meteor\n# fig, axs = plt.subplots(3, 1, sharex=True)\n# axs[0].plot(rv_meteor_plot['Heave [m]'], 'r', label='Heave')\n# axs[0].set_title(f\"Heave, Pitch and Roll measured by the Seapath\\n \"\n# f\"EUREC4A - RV-Meteor {begin_dt:%Y-%m-%d} - {end_dt:%Y-%m-%d}\")\n# axs[0].set_ylabel(\"Heave [m]\")\n# axs[1].plot(rv_meteor_plot['Pitch [°]'], 'b', label='Pitch')\n# axs[1].set_ylabel(\"Pitch [°]\")\n# axs[2].plot(rv_meteor_plot['Roll [°]'], 'g', label='Roll')\n# axs[2].set_ylabel(\"Roll [°]\")\n# axs[2].set_xlabel(\"Datetime [UTC]\")\n# axs[2].xaxis.set_major_formatter(hfmt)\n# fig.autofmt_xdate()\n# # plt.show()\n# if end_dt.date() > begin_dt.date():\n# plt.savefig(f\"{output_path}/RV-Meteor_seapath_heave-pitch-roll_{begin_dt:%Y%m%d}-{end_dt:%Y%m%d}.png\", dpi=250)\n# print(f\"Saved RV-Meteor_seapath_heave-pitch-roll_{begin_dt:%Y%m%d}-{end_dt:%Y%m%d}.png to {output_path}\")\n# else:\n# plt.savefig(f\"{output_path}/RV-Meteor_seapath_heave-pitch-roll_{begin_dt:%Y%m%d}.png\", dpi=250)\n# print(f\"Saved RV-Meteor_seapath_heave-pitch-roll_{begin_dt:%Y%m%d}.png to {output_path}\")\n\n########################################################################################################################\n# plot only Roll RV-Meteor\n\n# fig, ax = plt.subplots()\n# ax.set_title(f\"Roll Motion measured by the Seapath\\n \"\n# f\"EUREC4A - RV-Meteor {begin_dt:%Y-%m-%d} - {end_dt:%Y-%m-%d}\")\n# ax.plot(rv_meteor_plot['Roll [°]'], 'g', label='Roll')\n# ax.set_ylabel(\"Roll [°]\")\n# ax.set_xlabel(\"Datetime [UTC]\")\n# ax.xaxis.set_major_formatter(hfmt)\n# # add a black vertical line when a CTD started\n# # get y limits\n# y_lims = ax.get_ylim()\n# ax.vlines(ctd_plot.loc[ctd_plot['Action'] == 'in the water'].index, ymin=y_lims[0], ymax=y_lims[1],\n# color='k', lw=3, label=\"Start of CTD\")\n# # add a red vertical line when a CTD ended\n# ax.vlines(ctd_plot.loc[ctd_plot['Action'] == 'on deck'].index, ymin=y_lims[0], ymax=y_lims[1],\n# color='r', lw=3, label=\"End of CTD\")\n# ax.legend()\n# fig.autofmt_xdate()\n# # plt.show()\n# if end_dt.date() > begin_dt.date():\n# plt.savefig(f\"{output_path}/RV-Meteor_seapath_roll_{begin_dt:%Y%m%d}-{end_dt:%Y%m%d}.png\", dpi=250)\n# print(f\"Saved RV-Meteor_seapath_roll_{begin_dt:%Y%m%d}-{end_dt:%Y%m%d}.png to {output_path}\")\n# else:\n# plt.savefig(f\"{output_path}/RV-Meteor_seapath_roll_{begin_dt:%Y%m%d}.png\", dpi=250)\n# print(f\"Saved RV-Meteor_seapath_roll_{begin_dt:%Y%m%d}.png to {output_path}\")\n\n########################################################################################################################\n# plot all three instrument measurements in one plot\nvariables = [\"Roll [°]\", \"Pitch [°]\"]\nfor var, radar_var in zip(variables, [radar_roll, radar_pitch]):\n fig, ax = plt.subplots()\n ax.set_title(f\"{var[:-4]} Motion \\n \"\n f\"EUREC4A - RV-Meteor {begin_dt:%Y-%m-%d} - {end_dt:%Y-%m-%d}\")\n ax.plot(rv_meteor[var], 'g', label='Seapath 10Hz')\n ax.plot(arduino[var], 'b', label='Arduino 4Hz')\n ax.plot(pd.Series(radar_var['var'], index=[h.ts_to_dt(ts) for ts in radar_var['ts']]), 'r', label='LIMRAD94 0.5Hz')\n ax.set_ylabel(var)\n ax.set_xlabel(\"Datetime [UTC]\")\n ax.xaxis.set_major_formatter(hfmt)\n ax.legend(title='Instrument')\n fig.autofmt_xdate()\n if end_dt.date() > begin_dt.date():\n filename = f\"{output_path}/RV-Meteor_{var[:-4]}_comparison_{begin_dt:%Y%m%d}-{end_dt:%Y%m%d}.png\"\n plt.savefig(filename, dpi=250)\n print(f\"Saved {filename} to {output_path}\")\n else:\n filename = f\"{output_path}/RV-Meteor_{var[:-4]}_comparison_{begin_dt:%Y%m%d}.png\"\n plt.savefig(filename, dpi=250)\n print(f\"Saved {filename} to {output_path}\")\n\n########################################################################################################################\n# PDF of all three instruments\n# variables = [\"Roll [°]\", \"Pitch [°]\"]\n# for var, radar_var in zip(variables, [radar_roll, radar_pitch]):\n# # plot PDF, 40 bins\n# plt.hist([rv_meteor[var], arduino[var], radar_var['var']],\n# bins=40, density=True, histtype='bar', log=True, label=[\"Seapath 10Hz\", \"Arduino 4Hz\", \"LIMRAD94 0.5Hz\"])\n# plt.legend(title=\"Instrument\")\n# plt.xlabel(var)\n# plt.ylabel(\"Probability Density\")\n# title = f\"Probability Density Function of {var[:-6]} Motion\\n EUREC4A RV-Meteor \"\n# if end_dt.date() > begin_dt.date():\n# plt.title(f\"{title}{begin_dt:%Y-%m-%d} - {end_dt:%Y-%m-%d}\")\n# plt.tight_layout()\n# filename = f\"{output_path}RV-Meteor_all_{var[:-6]}_PDF_{begin_dt:%Y%m%d}-{end_dt:%Y%m%d}_log.png\"\n# plt.savefig(filename, dpi=250)\n# print(f\"{filename} saved to {output_path}\")\n# else:\n# plt.title(f\"{title}{begin_dt:%Y-%m-%d}\")\n# plt.tight_layout()\n# filename = f\"{output_path}RV-Meteor_all_{var[:-6]}_PDF_{begin_dt:%Y%m%d}_log.png\"\n# plt.savefig(filename, dpi=250)\n# print(f\"{filename} saved to {output_path}\")\n# plt.close()\n","repo_name":"jroettenbacher/Base","sub_path":"attitude_stat.py","file_name":"attitude_stat.py","file_ext":"py","file_size_in_byte":14988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"36322467331","text":"# coding=utf-8\n\nfrom sandcrawler.scraper import ScraperBase\nfrom sandcrawler.scraper import SimpleScraperBase, ScraperParseException\n\n\nclass MegaFilmesOnlineNet(SimpleScraperBase):\n BASE_URL = 'https://megafilmesonline.net'\n OTHER_URLS = ['http://megafilmesonline.net']\n LONG_SEARCH_RESULT_KEYWORD = 'the'\n\n def setup(self):\n self.register_scraper_type(ScraperBase.SCRAPER_TYPE_OSP)\n self.search_term_language = \"por\"\n raise NotImplementedError('The website is out of reach')\n\n self.register_media(ScraperBase.MEDIA_TYPE_FILM)\n\n self.register_url(\n ScraperBase.URL_TYPE_SEARCH,\n self.BASE_URL)\n\n self.register_url(\n ScraperBase.URL_TYPE_LISTING,\n self.BASE_URL)\n\n def _fetch_search_url(self, search_term, media_type):\n return self.BASE_URL + '?s=' + self.util.quote(search_term)\n\n def _fetch_no_results_text(self):\n return u'Desculpe, a página que você procura não pode ser encontrada.'\n\n def _fetch_next_button(self, soup):\n link = soup.select_one('a.nextpostslink')\n if link:\n return link['href']\n return None\n\n def _parse_search_result_page(self, soup):\n for result in soup.select('div#lista-posts div.box-video div.content a.thumb'):\n self.submit_search_result(\n link_url=result['href'],\n link_title=result['title'],\n image=self.util.find_image_src_or_none(soup, 'img')\n )\n\n\n\n def _parse_parse_page(self, soup):\n MARKER = 'http://www.megafilmesonline.net/ad/?video='\n PROTECTOR_MARKER = '/protetor/?video='\n index_page_title = self.util.get_page_title(soup)\n\n for iframe in soup.select('div.servers iframe'):\n # Often uses a local landing page eg:\n # http://www.megafilmesonline.net/ad/?video=http://www.ok.ru/videoembed/27087735410\n # Check to make sure it matches, then submit\n src = iframe.get('src', iframe.get('data-src', None))\n if not src:\n raise ScraperParseException('Could not find iframe src: %s' % iframe)\n\n src = src.replace('/protetor/?video=', '').replace('/ads/?video=', '')\n if src.startswith(MARKER):\n url = src[len(MARKER):]\n url = url[len(PROTECTOR_MARKER):] if url.startswith(PROTECTOR_MARKER) else url\n self.submit_parse_result(\n index_page_title=index_page_title,\n link_url=url,\n )\n else:\n # Otherwise, just submit that link.\n self.submit_parse_result(\n index_page_title=index_page_title,\n link_url=src,\n )\n","repo_name":"realchief/Scraping_BeautifulSoup_phantomjs","sub_path":"scrapers/megafilmesonline_net.py","file_name":"megafilmesonline_net.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"9826585979","text":"from pygame.sprite import Sprite\nfrom entity import Entity\nfrom motion import Motion\nfrom factory.pacman_image_factory import PacmanImageFactory\nfrom pacman_anim.lose_animator import LoseAnimator\nfrom pacman_anim.walk_animator import WalkAnimator\nfrom pygame.sprite import spritecollide\nfrom pygame.sprite import collide_rect\nfrom energizer import Energizer\nfrom sound_manager import SoundManager\n\n\nclass Pacman(Sprite):\n def __init__(self, pos, platforms, dots, ghosts, *groups):\n super().__init__(*groups)\n self.factory = PacmanImageFactory()\n self.factory.create()\n self.animator = WalkAnimator(self.factory)\n self.lose_animator = LoseAnimator(self.factory)\n self.image = self.animator.next(0)\n self.rect = self.image.get_rect(center=pos)\n self.col = Entity(self.rect.x+2, self.rect.y+2, 8, 8, (255, 0, 0))\n self.speed = 2\n self.motion = Motion(self.col, self.speed, platforms)\n self.motion.set_direction(Motion.LEFT)\n self.dots = dots\n self.alive = True\n self.ghosts = ghosts\n self.sound = SoundManager()\n self.dots_for_sound = 0\n\n def update(self, time):\n self.check_ghost_collide()\n if self.alive:\n self.move(time)\n else:\n self.lose()\n\n def move(self, time):\n '''\n Update pacman position\n '''\n self.motion.update()\n self.rect.center = self.col.rect.center\n self.eat_dots(time)\n '''\n x = (right / 8)\n y = (bottom / 8) - 3\n t = ((y - 1) * 28) + x\n '''\n '''\n Reset velocity and direction when\n pacman collides for the new user\n input direction\n '''\n if self.motion.collide_x:\n self.motion.reset_x()\n self.motion.reset_dir()\n if self.motion.collide_y or self.motion.teleporting:\n self.motion.reset_y()\n self.motion.reset_dir()\n\n '''\n Check where pacman is facing and draw sprite image\n '''\n self.motion.check_current_direction()\n if not self.motion.is_stopped():\n self.image = self.animator.next(self.motion.current_dir)\n\n def lose(self):\n self.image = self.lose_animator.next()\n\n def lose_completed(self):\n return self.lose_animator.ended()\n\n def eat_dots(self, time):\n dots = spritecollide(self.col, self.dots, True)\n if (len(dots) > 0):\n dot = dots[0]\n self.dots_for_sound += 1\n if self.dots_for_sound >= 2:\n # self.sound.play_munch(time)\n self.dots_for_sound = 0\n if isinstance(dot, Energizer):\n print(\"BIG!!!!\")\n pass\n\n def set_direction(self, dir):\n self.motion.set_direction(dir)\n\n def check_ghost_collide(self):\n if not self.alive:\n return\n for g in self.ghosts:\n if collide_rect(self.col, g.col):\n self.alive = False\n return\n\n def kill(self):\n self.col.kill()\n super().kill()\n","repo_name":"akDeveloper/80s-pacman","sub_path":"pacman.py","file_name":"pacman.py","file_ext":"py","file_size_in_byte":3096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"39981776419","text":"from collections import deque\r\n\r\ndi = [0, 1, 0, -1]\r\ndj = [1, 0, -1, 0]\r\n\r\ndef bfs(si, sj):\r\n q = deque()\r\n q.append((si, sj))\r\n visited[si][sj] = 1\r\n\r\n while q:\r\n ti, tj = q.popleft()\r\n\r\n for k in range(4):\r\n ni = ti + di[k]\r\n nj = tj + dj[k]\r\n\r\n if 0 > ni or N <= ni or 0 > nj or M <= nj:\r\n continue\r\n \r\n if arr[ni][nj] != 0 and visited[ni][nj] == 0:\r\n visited[ni][nj] = 1\r\n q.append((ni, nj))\r\n\r\n elif arr[ni][nj] == 0:\r\n count[ti][tj] += 1\r\n\r\nN, M = map(int, input().split())\r\narr = [list(map(int, input().split())) for _ in range(N)]\r\n\r\nday = 0\r\n\r\nwhile True:\r\n visited = [[0 for _ in range(M)] for _ in range(N)]\r\n count = [[0 for _ in range(M)] for _ in range(N)]\r\n cnt = 0\r\n\r\n for i in range(N):\r\n for j in range(M):\r\n if arr[i][j] != 0 and visited[i][j] == 0:\r\n cnt += 1\r\n bfs(i, j)\r\n\r\n for i in range(N):\r\n for j in range(M):\r\n if count[i][j] != 0:\r\n arr[i][j] = max(0, arr[i][j] - count[i][j])\r\n \r\n if cnt == 0:\r\n print(0)\r\n break\r\n \r\n if cnt >= 2:\r\n print(day)\r\n break\r\n \r\n day += 1","repo_name":"yhj12345/TIL-baekjoon","sub_path":"백준/Gold/2573. 빙산/빙산.py","file_name":"빙산.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"24850012704","text":"import sys\ninput = sys.stdin.readline\n\nN = int(input())\n\nrst = 1\nstart = end = 1\nsum = 1\n\nwhile end != N:\n if sum == N:\n rst += 1\n end += 1\n sum += end\n elif sum > N:\n sum -= start\n start += 1\n else:\n end += 1\n sum += end\n\nprint(rst)\n\n\n\n\n","repo_name":"bellringstar/python_study","sub_path":"algorithm/baekjoon/2018 수들의 합.py","file_name":"2018 수들의 합.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"42708269038","text":"from kzpy3.vis import *\nfrom kzpy3.teg1.rosbag_work.get_data_from_bag_files4 import *\nimport cv2\npath = '/media/your_computer/rosbags/bair_car_data'\npath = '/home/karlzipser/Desktop/Old_Desktop/bair_car_rescue/bair_car_data'\n#path = opjD('temp_bags_saved')\npath = '/home/karlzipser/Desktop/bair_car_data_6_min'\npath = opjD('bair_car_data_min')\npath = sys.argv[1]\nnum_bag_files_to_sample_from_given_run = 1\nnum_samples_from_given_bag_file = 1\n\nnum_frames = 32\n\ntopics = ['state','encoder','steer','motor','gyro','acc','left_right'] # images not included in this list\n\ndata_object = Bair_Car_Data(path, num_bag_files_to_sample_from_given_run, num_samples_from_given_bag_file, validation_set_flag=False)\nplt.ion()\n\ndef apply_rect_to_img(img,value,min_val,max_val,pos_color,neg_color,rel_bar_height,rel_bar_thickness,center=False,reverse=False):\n h,w,d = shape(img)\n p = (value - min_val) / (max_val - 1.0*min_val)\n if reverse:\n p = 1.0 - p\n if p > 1:\n p = 1\n if p < 0:\n p = 0\n wp = int(p*w)\n bh = int((1-rel_bar_height) * h)\n bt = int(rel_bar_thickness * h)\n \n if center:\n if wp < w/2:\n img[(bh-bt/2):(bh+bt/2),(wp):(w/2),:] = neg_color\n else:\n img[(bh-bt/2):(bh+bt/2),(w/2):(wp),:] = pos_color\n else:\n img[(bh-bt/2):(bh+bt/2),0:wp,:] = pos_color\n\nimg = np.zeros((94, 168,3),np.uint8)\n\nfor i in range(10000):\n\n a_data_sequence = data_object.get_data(topics, num_frames, num_frames)\n \"\"\"\n plt.figure('acc')\n plt.clf()\n plt.figure('acc')\n\n if not \"NO DATA\" == a_data_sequence['acc'][0]:\n plt.plot(a_data_sequence['acc'])\n plt.plot(a_data_sequence['state'])\n plt.plot(a_data_sequence['gyro'])\n plt.plot(a_data_sequence['motor'])\n plt.plot(np.array(a_data_sequence['steer']))\n plt.pause(0.00001)\n \"\"\"\n for j in range(num_frames):\n im = a_data_sequence['left'][j]\n img[:,:,0] = im\n img[:,:,1] = im\n img[:,:,2] = im\n apply_rect_to_img(img,a_data_sequence['steer'][j],0,99,[255.0,0,0],[0,255,0.0],0.9,0.1,center=True,reverse=True)\n img4 = imresize(img,4.0)\n #apply_rect_to_img(img,a_data_sequence['acc'][j][1]-9.8,-20,20,[255.0,0,0],[0,255,0.0],0.8,0.1,center=True,reverse=False)\n #mi(img,'left',img_title=str(j))\n #plt.pause(0.00001)\n cv2.imshow('left',img4.astype('uint8'))\n if cv2.waitKey(1) & 0xFF == ord('q'):\n pass\n time.sleep(0.0003)\n","repo_name":"SaschaHornauer/autonomous-mc-lab","sub_path":"source/network_training/teg2/data/access/example_of_accessing_data2.py","file_name":"example_of_accessing_data2.py","file_ext":"py","file_size_in_byte":2509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"72648715189","text":"#!/usr/bin/env python\nimport sys, os.path, fnmatch, re, shlex, subprocess\n\ndef searchMakefile(where, visit, recursive):\n if not os.path.exists(where):\n return False\n if recursive:\n for root, dirname, filenames in os.walk(where):\n for filename in fnmatch.filter(filenames, 'Makefile*'):\n if visit(os.path.join(root, filename)):\n return os.path.join(root, filename)\n else:\n standardMakefile = os.path.join(where, 'Makefile')\n if os.path.isfile(standardMakefile) and visit(standardMakefile):\n return standardMakefile\n for makefile in fnmatch.filter(os.listdir(where), 'Makefile*'):\n if visit(os.path.join(where, makefile)):\n return os.path.join(where, makefile)\n\ndef searchMakefileContainingFilename(where, filename):\n # FIXME: Using only the base name can lead to false positives if two files with the\n # same name have different build options (i.e. appearing in different Makefiles).\n # We should search for the relative path between the pro file and the source file instead.\n findBasename = re.compile('[/\\s]' + re.escape(os.path.basename(filename)))\n def containsSource(makefile):\n for line in open(makefile):\n if findBasename.search(line):\n return True\n return searchMakefile(where, containsSource, recursive = True)\n\ndef isBuildDirOfSource(buildDir, sourceFile):\n # Assume it's a build dir of our file if it contains a Makefile that was\n # generated using a .pro file placed in a parent directory of our source file.\n def checkQmakeHeader(makefile):\n prog = re.compile('# Project:\\s*(.*)')\n for line in open(makefile):\n if not line.startswith('#'):\n return False\n m = prog.match(line)\n if m:\n proFilePath = m.group(1)\n if not os.path.isabs(proFilePath):\n proFprintilePath = os.path.join(os.path.dirname(makefile), proFilePath)\n proFileDir = os.path.realpath(os.path.dirname(proFilePath))\n sourceDir = os.path.realpath(os.path.dirname(sourceFile))\n return sourceDir.startswith(proFileDir)\n return searchMakefile(buildDir, checkQmakeHeader, recursive = False)\n\ndef extractRelevantFlags(flags):\n # Keep flags that affect the preprocessor and could fail the compile.\n relevantFlags = re.findall('-fPIC|-fPIE|-include [^\\s]+', flags)\n return ' '.join(relevantFlags)\n\ndef removeVarRef(flags):\n return re.sub('\\s?\\$\\([^\\)]*\\)', '', flags) if flags else ''\n\ndef getOptions(makefile, sourceFile, onlyRelevant=True):\n makefileDir = os.path.dirname(os.path.realpath(makefile))\n cmdProg = re.compile('^CXX\\s*=\\s*(.*)')\n definesProg = re.compile('^DEFINES\\s*=\\s*(.*)')\n incpathProg = re.compile('^INCPATH\\s*=\\s*(.*)')\n flagsProg = re.compile('^CXXFLAGS\\s*=\\s*(.*)')\n cmd = None\n defines = None\n incpath = None\n relevantFlags = None\n for line in open(makefile):\n cmdMatch = cmdProg.match(line)\n definesMatch = definesProg.match(line)\n incpathMatch = incpathProg.match(line)\n flagsMatch = flagsProg.match(line)\n if cmdMatch:\n cmd = cmdMatch.group(1)\n elif definesMatch:\n defines = definesMatch.group(1)\n elif incpathMatch:\n incpath = re.sub('-I([^/])', '-I' + makefileDir + '/\\\\1', incpathMatch.group(1))\n # Explicitely include config.h for WebKit headers which expect their including cpp file to do so.\n if \"/webkit/\" in sourceFile and sourceFile.endswith('.h'):\n incpath += ' -include config.h'\n elif flagsMatch:\n relevantFlags = removeVarRef(flagsMatch.group(1))\n if onlyRelevant:\n relevantFlags = extractRelevantFlags(relevantFlags)\n if cmd != None and defines != None and incpath != None and relevantFlags != None:\n return cmd, shlex.split(relevantFlags + ' ' + defines + ' ' + incpath)\n\ndef makefileForSource(buildDirs, sourceFile):\n makefile = None\n for buildDir in buildDirs:\n # Heuristic to fast skip build dirs unrelated to this source file.\n if not isBuildDirOfSource(buildDir, sourceFile):\n continue\n makefile = searchMakefileContainingFilename(buildDir, sourceFile)\n if makefile:\n break\n if not makefile:\n sys.stderr.write('Options for [%s] could not be found. Make sure that you have an existing build.\\n' % sourceFile)\n sys.exit(-1)\n return makefile\n\ndef sublimeClangArguments(makefile, sourceFile):\n # Extract the options from the makefile that lists our source file somewhere as a dependency.\n cmd, options = getOptions(makefile, sourceFile)\n print(' '.join(options))\n\ndef build(makefile, sourceFile):\n # Extract the options from the makefile that lists our source file somewhere as a dependency.\n cmd, args = getOptions(makefile, sourceFile, onlyRelevant=False)\n args += ('-fsyntax-only', sourceFile)\n sys.exit(subprocess.call([cmd,] + args))\n\nif __name__ == \"__main__\":\n if sys.argv[1] != '-b':\n buildDirs = sys.argv[1:-1]\n sourceFile = sys.argv[-1]\n sublimeClangArguments(makefileForSource(buildDirs, sourceFile), sourceFile)\n else:\n buildDirs = sys.argv[2:-1]\n sourceFile = sys.argv[-1]\n build(makefileForSource(buildDirs, sourceFile), sourceFile)\n","repo_name":"gistable/gistable","sub_path":"all-gists/2695384/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":5470,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"94"} +{"seq_id":"71458313909","text":"__author__='thiagocastroferreira'\n\nimport sys\nsys.path.append('../')\nimport json\nimport paths\nimport os\nimport re\n\nfrom nltk.corpus import stopwords\nstop_ = set(stopwords.words('english'))\n\nDATA_PATH=paths.DATA_PATH\nTRAIN_PATH=os.path.join(DATA_PATH, 'trainset.data')\nDEV_PATH=os.path.join(DATA_PATH, 'devset.data')\n\nALIGNMENTS_PATH=paths.ALIGNMENTS_PATH\nif not os.path.exists(ALIGNMENTS_PATH):\n os.mkdir(ALIGNMENTS_PATH)\n\nclass SemevalAlignments():\n def __init__(self, lowercase=True, stop=True, punctuation=True):\n self.lowercase = lowercase\n self.stop = stop\n self.punctuation = punctuation\n\n self.devset = json.load(open(DEV_PATH))\n\n self.trainset = json.load(open(TRAIN_PATH))\n self.prepare()\n\n\n def remove_punctuation(self, tokens):\n return re.sub(r'[\\W]+',' ', ' '.join(tokens)).strip().split()\n\n\n def remove_stopwords(self, tokens):\n return [w for w in tokens if w.lower() not in stop_]\n\n\n def prepare(self):\n corpus = []\n\n for i, q1id in enumerate(self.trainset):\n query = self.trainset[q1id]\n q1 = [w.lower() for w in query['tokens']] if self.lowercase else query['tokens']\n q1 = self.remove_punctuation(q1) if self.punctuation else q1\n q1 = self.remove_stopwords(q1) if self.stop else q1\n q1 = ' '.join(q1)\n\n duplicates = query['duplicates']\n for duplicate in duplicates:\n rel_question = duplicate['rel_question']\n q2id = rel_question['id']\n q2 = [w.lower() for w in rel_question['tokens']] if self.lowercase else rel_question['tokens']\n q2 = self.remove_punctuation(q2) if self.punctuation else q2\n q2 = self.remove_stopwords(q2) if self.stop else q2\n q2 = ' '.join(q2)\n\n corpus.append({\n 'source': q1,\n 'target': q2\n })\n corpus.append({\n 'source': q2,\n 'target': q1\n })\n\n for comment in duplicate['rel_comments']:\n q3id = comment['id']\n q3 = [w.lower() for w in comment['tokens']] if self.lowercase else comment['tokens']\n q3 = self.remove_punctuation(q3) if self.punctuation else q3\n q3 = self.remove_stopwords(q3) if self.stop else q3\n if len(q3) == 0:\n q3 = ['eos']\n\n q3 = ' '.join(q3)\n corpus.append({\n 'source': q1,\n 'target': q3\n })\n corpus.append({\n 'source': q3,\n 'target': q1\n })\n\n path = os.path.join(ALIGNMENTS_PATH, 'align')\n if self.lowercase: path += '.lower'\n if self.stop: path += '.stop'\n if self.punctuation: path += '.punct'\n if not os.path.exists(path):\n os.mkdir(path)\n\n with open(os.path.join(path, 'semeval.de'), 'w') as f:\n f.write('\\n'.join(map(lambda x: x['source'], corpus)))\n\n with open(os.path.join(path, 'semeval.en'), 'w') as f:\n f.write('\\n'.join(map(lambda x: x['target'], corpus)))\n\nif __name__ == '__main__':\n SemevalAlignments(lowercase=True, stop=True, punctuation=True)\n SemevalAlignments(lowercase=False, stop=True, punctuation=True)\n SemevalAlignments(lowercase=True, stop=False, punctuation=True)\n SemevalAlignments(lowercase=True, stop=True, punctuation=False)\n SemevalAlignments(lowercase=False, stop=False, punctuation=True)\n SemevalAlignments(lowercase=True, stop=False, punctuation=False)\n SemevalAlignments(lowercase=False, stop=True, punctuation=False)\n SemevalAlignments(lowercase=False, stop=False, punctuation=False)","repo_name":"fkunneman/DiscoSumo","sub_path":"ranlp/semeval/alignments/alignments.py","file_name":"alignments.py","file_ext":"py","file_size_in_byte":3894,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"17562165093","text":"from Helpers.test_logger import logger\nfrom Pages.header import HeaderPage\nfrom Pages.result import ResultPage\nfrom Pages.favorite import Favorite\n\n\"\"\"\n\n1. Login to the system\n2. Go to Electronics page\n3. Add random number of products as a favorite\n4. Go to My Account-> Favorite Ads\n5. Check that Favorite page contains correct(selected) products\n\n\"\"\"\n\n\ndef test_favorite_items(driver, login):\n header_page = HeaderPage(driver)\n result_page = ResultPage(driver)\n favorite_page = Favorite(driver)\n\n header_page.click_on_logo()\n favorite_page.check_favorite_ads()\n favorite_page.clear_favorites()\n header_page.click_on_logo()\n header_page.click_menu_tab()\n favorite_item_list = result_page.add_to_favorites()\n favorite_page.check_favorite_ads()\n my_favorite_list = favorite_page.get_favorite_items()\n header_page.click_menu_tab()\n favorite_page.check_favorite_ads()\n assert sorted(favorite_item_list) == sorted(my_favorite_list),\\\n logger(\"Result of favorites is incorrect\", error=True)\n logger(\"The favorite activity is correct \")\n","repo_name":"Hemush1990/Repo_exam","sub_path":"UI_Automation_Exam/Tests/test_2_check_favorite.py","file_name":"test_2_check_favorite.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"142123348","text":"# coding=utf-8\n\nBYTES = 0xFF\n\n\nclass UserDatagramProtocol(object):\n\n def __init__(self):\n self.source_port = BYTES * 2\n self.destination_port = BYTES * 2\n\n self.length = BYTES * 2\n self.destination_port = BYTES * 2\n\n @staticmethod\n def get_port():\n return 17\n","repo_name":"sodaxiaoyao/ErrorSet","sub_path":"NetworkSet/CiscoCertificationTest/Reference_model/OSI/d_Transport/UDP.py","file_name":"UDP.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"24088140201","text":"# Tornado is more robust - consider using over Flask if do not need to worry about templates?\n\nimport base64\nimport logging\nimport os\nimport socket\nimport subprocess\nimport sys\nimport uuid\nfrom test import run\n\nimport tornado.ioloop\nimport tornado.options\nimport tornado.web\nimport signal\n\nfrom color_grey_conversion import color_to_grey\n\nIMG_FOLDER = os.path.join(os.path.dirname(__file__), \"dataset/val_img\")\nINST_FOLDER = os.path.join(os.path.dirname(__file__), \"dataset/val_inst\")\nLABEL_FOLDER = os.path.join(os.path.dirname(__file__), \"dataset/val_label\")\n\nverbose = ((sys.argv[1] if 1 < len(sys.argv) else \"\")==\"verbose\")\n\nSTATIC_FOLDER = os.path.join(os.path.dirname(__file__), 'static')\n\n# This is where the temp image will go\nEXPORT_LOCATION = \"/tmp\"\n\nSTATIC_IMG_FOLDER = os.path.join(\n os.path.dirname(__file__),\n \"img\"\n)\n\n\ndef check_for_dataset_folder():\n if not os.path.isdir(\"dataset/\"):\n os.mkdir(\"dataset/\")\n if not os.path.isdir(\"dataset/val_img\"):\n os.mkdir(\"dataset/val_img\")\n if not os.path.isdir(\"dataset/val_inst\"):\n os.mkdir(\"dataset/val_inst\")\n if not os.path.isdir(\"dataset/val_label\"):\n os.mkdir(\"dataset/val_label\")\n\n\ndef parse_static_filepath(filepath):\n split_filepath = filepath.split('/')\n while len(split_filepath) > 2:\n split_filepath.pop(0)\n\n return '/'.join(split_filepath)\n\n\ndef run_model(filename):\n \"\"\"Runs the pretrained COCO model\"\"\"\n # TODO check to see if this goes any faster with GPUS enabled...\n # TODO make is it so that concurrent users won't mess with eachother :P aka have hashed or something dataset routes...\n # that will also take a lot of cleaning up...\n # TODO figure out how to not do this from the command line...\n return run(verbose=verbose)\n\n\ndef copy_file(old=\"avon.png\", new=\"avon.png\"):\n command_string = \"cp \" + old + \" \" + new\n subprocess.check_output(command_string.split(\" \"))\n\n\ndef make_processable(greyscale_fname, output_color_file):\n # Inst folder\n ouptut_greyscale_file = INST_FOLDER + \"/\" + greyscale_fname\n\n # Converts the file to greyscale and saves it to the inst folder?\n if verbose:\n print(output_color_file, ouptut_greyscale_file)\n color_to_grey.convert_rgb_image_to_greyscale(\n output_color_file,\n ouptut_greyscale_file\n )\n\n ouptut_greyscale_file_labels = LABEL_FOLDER + \"/\" + greyscale_fname\n\n copy_file(ouptut_greyscale_file, ouptut_greyscale_file_labels)\n\n ouptut_greyscale_file_img = IMG_FOLDER + \"/\" + greyscale_fname\n copy_file(ouptut_greyscale_file, ouptut_greyscale_file_img)\n\n# def export_image(greyscale_fname):\n# current_image_location = EXPORT_LOCATION + \"/\" + greyscale_fname\n# export_image_location = STATIC_IMG_FOLDER + \\\n# \"/\" + uuid.uuid4().hex + greyscale_fname\n# copy_file(current_image_location, export_image_location)\n# return export_image_location\n\n\nclass UploadHandler(tornado.web.RequestHandler):\n def post(self, name=None):\n # TODO Fix this with how we will be getting the file from the front end...\n # TODO change the way that we save the model?\n self.application.logger.info(\"Recieved a file\")\n pic = str(self.request.body)\n # print(pic.split(','))\n base64_string = pic.split(\",\")[1]\n img_data = base64.b64decode(base64_string)\n color_fname = \"color.png\"\n # relative_path = \"img/\" + color_fname\n output_color_file = STATIC_IMG_FOLDER + \"/\" + color_fname\n\n # Writes the color image\n with open(output_color_file, \"wb+\") as out_f:\n out_f.write(img_data)\n\n greyscale_fname = \"greyscale.png\"\n\n make_processable(greyscale_fname, output_color_file)\n\n # We shouldnt need to pass it a string anymore\n export_image_location = run_model(greyscale_fname)\n if verbose:\n print(export_image_location)\n static_image_location = parse_static_filepath(export_image_location)\n if verbose:\n print(static_image_location)\n\n self.write({\n \"result\": \"success\",\n \"location\": static_image_location\n })\n\n\nclass MainHandler(tornado.web.RequestHandler):\n def get(self, name=None): # I *think* name is the sub endpoint?\n # NOTE - if you pass self.write a dictionary, it will automatically write out\n # JSON and set the content type to JSON\n self.render(\"index.html\")\n # Other methods: self.redirect, self.get_argument, self.request.body,\n\n\nclass MainApplication(tornado.web.Application):\n is_closing = False\n\n def signal_handler(self, signum, frame):\n logging.info('exiting...')\n self.is_closing = True\n\n def try_exit(self):\n if self.is_closing:\n tornado.ioloop.IOLoop.instance().stop()\n logging.info('exit success')\n\n def __init__(self, **settings):\n tornado.web.Application.__init__(self, **settings)\n # Add in various member variables here that you want the handlers to be aware of\n # e.g. a database client\n\n # Add the handlers here - use regular expressions or hardcoded paths to link the endpoints\n # with handlers?\n self.port = settings.get('port', 80)\n self.address = settings.get('address', \"0.0.0.0\")\n self.ioloop = tornado.ioloop.IOLoop.instance()\n self.logger = logging.getLogger()\n\n # Tie the handlers to the routes here\n self.add_handlers(\".*\", [\n (r\"/\", MainHandler),\n (r\"/upload\", UploadHandler),\n (r\"/img/(.*)\", tornado.web.StaticFileHandler,\n {\"path\": STATIC_IMG_FOLDER}),\n (r\".*/static/(.*)\", tornado.web.StaticFileHandler,\n {\"path\": STATIC_FOLDER})\n ])\n\n def run(self):\n try:\n signal.signal(signal.SIGINT, self.signal_handler)\n self.listen(self.port, self.address)\n tornado.ioloop.PeriodicCallback(self.try_exit, 100).start()\n\n except socket.error as e:\n self.logger.fatal(\"Unable to listen on {}:{} = {}\".format(\n self.address, self.port, e))\n sys.exit(1)\n self.ioloop.start()\n\n\nif __name__ == \"__main__\":\n\n check_for_dataset_folder()\n tornado.options.define(\n \"debug\",\n default=False,\n help=\"Enable debugging mode.\"\n )\n tornado.options.define('port', default=80, help='Port to listen on.')\n host = \"0.0.0.0\"\n if sys.platform == \"win32\":\n host = \"127.0.0.1\"\n tornado.options.define('address', default=host, help='Url')\n\n tornado.options.define('template_path', default=os.path.join(\n os.path.dirname(__file__), \"templates\"), help='Path to templates')\n tornado.options.parse_command_line()\n options = tornado.options.options.as_dict()\n if verbose:\n print(options)\n app = MainApplication(**options)\n app.run()\n","repo_name":"noyoshi/smart-sketch","sub_path":"backend/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":6882,"program_lang":"python","lang":"en","doc_type":"code","stars":232,"dataset":"github-code","pt":"94"} +{"seq_id":"71854318068","text":"import sys\nsys.path.append(\"..\")\nfrom src.constants import *\nimport src.wrappers.patch_notes as pn\nimport src.wrappers.platform as pl\nfrom src.utils import help\nfrom src.utils import hero_name_formatter as hnf\n#\n# Param Checks\ntry:\n #\n # Receive script params\n command = sys.argv[1]\n tag = sys.argv[2] if len(sys.argv) > 2 else None\n platform = sys.argv[3] if len(sys.argv) > 3 else 'pc'\n region = sys.argv[4] if len(sys.argv) > 4 else 'eu'\n mode = sys.argv[5] if len(sys.argv) > 5 else 'quickplay'\n hero = sys.argv[6] if len(sys.argv) > 6 else None\n #\nexcept IndexError as e:\n print(\"Invalid Arguments.\\n\" + str(e))\n exit(1)\n#\n# Cleaning Params\ntry:\n command = str.strip(str.lower(command))\n if tag is not None and tag is not ALL:\n tag = tag.replace(\"#\", \"-\")\n #\n if hero is not None:\n hero = hnf.hero_name_formatter(hero)\nexcept Exception as e:\n print(\"Stat retrieval failed. Incorrect command/s.\\n\" + str(e))\n exit(1)\n#\n# Call API Wrappers\nif command in HELP:\n help.get_help()\nelif command in PATCH_NOTES:\n if tag is not None and tag in ALL:\n for patchnote in pn.get_patch_notes(platform):\n patchnote.display_api_obj()\n else:\n pn.get_patch_notes(platform)[0].display_api_obj()\nelif command in ACHIEVEMENTS:\n pl.get_achievements(tag, platform, region).display_api_obj()\nelif command in PLATFORMS:\n for platform in pl.get_platforms(tag, platform, region):\n platform.display_api_obj()\nelif command in PROFILES:\n pl.get_profile(tag, platform, region).display_api_obj()\nelif hero is None:\n if command in ALL_HEROES_STATS:\n pl.get_all_heroes_stats(tag, platform, region, mode).display_api_obj()\n elif command in GET_MEDALS:\n pl.get_all_heroes_stats(tag, platform, region, mode).get_medals()\n elif command in GET_KILLS:\n pl.get_all_heroes_stats(tag, platform, region, mode).get_kills()\n elif command in GET_TIME:\n pl.get_all_heroes_stats(tag, platform, region, mode).get_time()\n elif command in GET_GAMES:\n pl.get_all_heroes_stats(tag, platform, region, mode).get_games()\n elif command in GET_OBJECTIVES:\n pl.get_all_heroes_stats(tag, platform, region, mode).get_objectives()\nelif hero is not None:\n if command in ALL_HEROES_STATS:\n pl.get_heroes_stats(tag, hero, platform, region, mode).display_api_obj()\n elif command in GET_MEDALS:\n pl.get_heroes_stats(tag, hero, platform, region, mode).get_medals()\n elif command in GET_KILLS:\n pl.get_heroes_stats(tag, hero, platform, region, mode).get_kills()\n elif command in GET_TIME:\n pl.get_heroes_stats(tag, hero, platform, region, mode).get_time()\n elif command in GET_GAMES:\n pl.get_heroes_stats(tag, hero, platform, region, mode).get_games()\n elif command in GET_OBJECTIVES:\n pl.get_heroes_stats(tag, hero, platform, region, mode).get_objectives()\nelse:\n print(\"Stat retrieval failed. Incorrect command/s\")\n exit(1)","repo_name":"eldrad294/Overwatch_API_Wrapper","sub_path":"src/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"19523541070","text":"\"\"\"\n\n0 1 2 3 4\nx y z\nx y\n x y\n x y\n\nn\n2 2 2\n~\n\nIdea 1\nBF\nCheck all places\nTime: O(2^n)\nSpace: ~n to store those indeces\n\nIdea 2\nRecursive\nmoney for nums[0..n-1] = max(\n money for num[1..n-1],\n num[0] + money for num[2..n-1]\n)\nTime: O(2^n) ~ two calls per each element\nWith memoization: O(n)\nSpace: ~n - for stack till last el\n\n\nIdea 3\nIterative\nFill from end\nmoney[num of houses from left]\nmoney[0] = 0\nmoney[1] = nums[0]\nmoney[2] = max(money[2-2] + num[1], money[2-1])\n...\n\nTime: ~n\nSpace: ~n\n\n\"\"\"\n\nclass Solution:\n def rob(self, nums) -> int:\n if not nums:\n return 0\n money = [0, nums[0]]\n for id in range(1, len(nums)):\n money += [max(\n nums[id] + money[id-1],\n money[id]\n )]\n return money[len(nums)]\n\n\"\"\"\nEx1\n1 2 3 1\nid money\n 0\n0 1\n1 2\n2 4 or 2 4\n3 3 or 4 4\n> 4\n\"\"\"\n","repo_name":"artkpv/code-dojo","sub_path":"leetcode.com/problems/house-robber/pr.py","file_name":"pr.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"20716808797","text":"from characters import Character\n\nclass Shadow(Character):\n #Starting health is 1, takes times 10% of the time.\n def __init__(self, health, power):\n self.name = 'Shadow'\n self.health = 1\n self.power = power\n \n def attack(self, hero):\n hero.health -= self.power\n print(\"The shadow does {} damage to you\".format(self.power))\n if(hero.health <= 0):\n print(\"You are dead\")\n","repo_name":"laddeyboy/DC-exercises","sub_path":"python/Week2/RPG/characters/Shadow.py","file_name":"Shadow.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"37983001583","text":"import os\nimport sys\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\n\nimport pandas as pd\nfrom currency_converter import CurrencyConverter\nfrom pathlib import Path\nfrom glob import glob\n\n\nclass History(pd.DataFrame):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n @classmethod\n def fromFile(cls, path):\n \"\"\" initializes a History object / pd df from a filesystem path\n \n >>> t = History.fromFile(\"test/merged.csv\")\n \"\"\"\n\n df = History(pd.read_csv(path))\n df['Date/Time'] = pd.to_datetime(df['Date/Time'],\n format='%m/%d/%Y %I:%M %p')\n df['Expiration Date'] = pd.to_datetime(\n df['Expiration Date'], format='%m/%d/%Y')\n df.addEuroConversion()\n df._selfTest()\n df.drop(columns=\"Account Reference\", inplace=True)\n return df\n\n\n def addEuroConversion(self):\n \"\"\" adds a new column called \"AmountEuro\" and \"FeesEuro\" to the dataframe\n\n >>> t = History.fromFile(\"test/merged.csv\")\n >>> t.addEuroConversion()\n >>> \"AmountEuro\" in t.columns\n True\n >>> \"FeesEuro\" in t.columns\n True\n \"\"\"\n c = CurrencyConverter(fallback_on_missing_rate=True,\n fallback_on_wrong_date=True)\n self['Date/Time'] = pd.to_datetime(self['Date/Time'])\n self['Expiration Date'] = pd.to_datetime(self['Expiration Date'])\n self['AmountEuro'] = self.apply(lambda x: c.convert(\n x['Amount'], 'USD', 'EUR', date=x['Date/Time']), axis=1)\n self['FeesEuro'] = self.apply(lambda x: c.convert(\n x['Fees'], 'USD', 'EUR', date=x['Date/Time']), axis=1)\n\n def _selfTest(self):\n if \"Date/Time\" not in self.columns:\n raise ValueError(\n \"Couldn't find the first column labeled Date/Time in the csv file\")\n\n if not self[\"Date/Time\"].is_monotonic_decreasing:\n raise ValueError(\n \"The 'Date/Time' column is not monotonically decreasing. We can't sort the file because the timestamps are not unique. Please do it manually.\")\n\n @classmethod\n def _merge(cls, pathIn):\n \"\"\" \n some test code to assemble test data. Merges multiple tastyworks csv files into one and keeps order. Doesn't remove duplicates\n\n ### Warning ### I think I messed up here and this doesn't work. In the end, I merged the files manually\n - the csv files have duplicate entries because the time only has minute resolution\n - you can't sort it because, again, only minute resolution. That leads to close before open\n I've used\n ls -r 20*.csv | tr '\\n' '\\0' |xargs -0 cat > merged2.csv\n in a shell and removed the duplicated headers manually\n\n >>> merged = History._merge([Path(p) for p in glob(str(Path('test/20*.csv').expanduser()))]) # doctest: +SKIP\n >>> merged.to_csv(\"test/temp.csv\", index=None, date_format='%m/%d/%Y %I:%M %p') # doctest: +SKIP\n \"\"\"\n h = []\n for csvfile in pathIn:\n temp = pd.read_csv(csvfile,\n parse_dates=[\"Date/Time\"], index_col=False)\n temp[\"Date/Time\"] = pd.to_datetime(temp['Date/Time'],\n format='%m/%d/%Y %I:%M %p')\n h.append(temp)\n result = pd.concat(h, ignore_index=True)\n result = result.sort_values(\"Date/Time\", ascending=True)\n return result[::-1]\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n","repo_name":"avion23/tastyworksTaxes","sub_path":"tastyworksTaxes/history.py","file_name":"history.py","file_ext":"py","file_size_in_byte":3618,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"72599423670","text":"import os\nimport streamlit as st\nimport subprocess\nimport boto3\nimport shutil\nimport requests\nfrom datetime import datetime\nfrom dotenv import load_dotenv\nfrom transformers import ViTImageProcessor, ViTForImageClassification\nfrom PIL import Image\nimport io\n\nload_dotenv()\n\nst.set_page_config(page_title=\"VSCO Analyzer\",\n page_icon=':thought_balloon:', layout='wide')\n\ns3_client = boto3.client('s3')\n\ndef check_vsco_username_exists(vsco_username):\n url = f\"https://vsco.co/{vsco_username}/gallery\"\n response = requests.head(url)\n return response.status_code == 200\n\ndef is_username_processed(vsco_username):\n try:\n response = s3_client.list_objects_v2(Bucket='vsco-scrapper', Prefix='logs_username/')\n for obj in response.get('Contents', []):\n if obj['Key'].startswith('logs_username/'):\n log_key = obj['Key']\n username_in_log = log_key.split('/')[1]\n if username_in_log == vsco_username:\n return True\n return False\n except s3_client.exceptions.NoSuchBucket:\n return False\n\ndef download_and_analyze_vsco_data(vsco_username):\n # Check if the username has already been processed\n if is_username_processed(vsco_username):\n st.info(f\"VSCO username '{vsco_username}' has already been processed. Skipping download.\")\n return\n\n # Download VSCO data using vsco-scraper\n try:\n subprocess.run(f\"vsco-scraper {vsco_username} --getImages --getCollection --getProfile\", shell=True, check=True)\n except subprocess.CalledProcessError:\n st.warning(\"Error occurred during data download. Please try again later.\")\n return\n\n # Move the downloaded data to the temp_data directory\n local_path_to_data = f\"./{vsco_username}\"\n temp_data_dir = \"./temp_data\"\n os.makedirs(temp_data_dir, exist_ok=True)\n for root, _, files in os.walk(local_path_to_data):\n for file in files:\n relative_path = os.path.relpath(root, local_path_to_data)\n s3_key = f'{vsco_username}/{relative_path}/{file}'\n s3_client.upload_file(os.path.join(root, file), 'vsco-scrapper', s3_key)\n\n # Remove the temp_data directory to avoid cluttering\n shutil.rmtree(temp_data_dir)\n shutil.rmtree(local_path_to_data)\n\n # AI analysis on images in the journal\n predicted_classes = perform_ai_analysis(vsco_username)\n\n # Display the predicted classes in Streamlit\n if predicted_classes:\n st.subheader(\"Predicted Classes for Images in the Feed:\")\n for i, predicted_class in enumerate(predicted_classes, start=1):\n st.text(f\"Image {i}: {predicted_class}\")\n else:\n st.warning(\"No images found in the journal or AI analysis results.\")\n\n # Update the log file with the processed username\n update_log(vsco_username)\n\n return None \n\ndef fetch_feed_images(vsco_username):\n # Fetch feed images from S3\n feed_images = []\n feed_prefix = f\"{vsco_username}/\"\n try:\n response = s3_client.list_objects_v2(Bucket='vsco-scrapper', Prefix=feed_prefix)\n for obj in response.get('Contents', []):\n image_key = obj['Key']\n # Exclude profile and collection images\n if not image_key.startswith(f\"{vsco_username}/profile/\") and not image_key.startswith(f\"{vsco_username}/collection/\"):\n response = s3_client.get_object(Bucket='vsco-scrapper', Key=image_key)\n image_data = response['Body'].read()\n feed_images.append(image_data)\n except s3_client.exceptions.NoSuchBucket:\n pass\n\n return feed_images\n\ndef perform_ai_analysis(vsco_username):\n feed_images = fetch_feed_images(vsco_username)\n\n predicted_classes = []\n processor = ViTImageProcessor.from_pretrained('google/vit-base-patch16-224')\n model = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224')\n\n for image_data in feed_images:\n image = Image.open(io.BytesIO(image_data))\n inputs = processor(images=image, return_tensors=\"pt\")\n outputs = model(**inputs)\n logits = outputs.logits\n # model predicts one of the 1000 ImageNet classes\n predicted_class_idx = logits.argmax(-1).item()\n predicted_class = model.config.id2label[predicted_class_idx]\n predicted_classes.append(predicted_class)\n return predicted_classes\n\ndef execute_vsco_scrapper_notebook_script(vsco_username):\n cmd = f\"python script.py {vsco_username}\"\n process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = process.communicate()\n return stdout, stderr\n\n# UPDATE AWS S3 USERNAME_LOG\ndef update_log(vsco_username):\n timestamp = datetime.utcnow().strftime('%Y-%m-%d_%H-%M-%S')\n log_filename = f\"{vsco_username}_{timestamp}.txt\"\n\n try:\n s3_client.put_object(Body=vsco_username, Bucket='vsco-scrapper', Key=f'logs_username/{log_filename}')\n except Exception as e:\n st.warning(\"Failed to update log. Please try again later.\")\n print(e)\n\ndef main():\n vsco_username = st.text_input(\"Enter VSCO Username\", \"\")\n # Button to trigger data retrieval and analysis\n if st.button(\"Analyze\"):\n if vsco_username:\n if check_vsco_username_exists(vsco_username):\n try:\n # Download data and perform analysis THIS WORKS\n # download_and_analyze_vsco_data(vsco_username)\n # Execute the Jupyter Notebook script with the username\n stdout, stderr = execute_vsco_scrapper_notebook_script(vsco_username)\n st.success(\"Data processing completed successfully.\")\n except Exception as e:\n st.error(\"An error occurred during data processing. Please try again later.\")\n print(e)\n else:\n st.warning(\"Username not found or doesn't exist. Please make sure to spell it correctly.\")\n else:\n st.warning(\"Please enter a valid VSCO username.\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"mahakanakala/vsco-analytics","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6113,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"31218985827","text":"import os\n\nboard = [['-' for i in range(10)] for j in range(10)]\nsnake = [(0,0)]\nfood = [(1,1),(0,5),(1,4)]\nfor i in food:\n board[i[0]][i[1]] = 'F'\n\nboard[0][0] = 'S'\nprint(*board,sep=\"\\n\")\n\nprint(\"To move the snake, enter a direction (w, a, s, d) and press enter.\")\nwhile True:\n choice = input(\"Enter a direction: \")\n os.system('cls')\n if choice == 'w':\n snakehead = (snakehead[0]-1,snakehead[1])\n snake.pop()\n snake.insert(0,snakehead)\n elif choice == 'a':\n snakehead = (snakehead[0],snakehead[1]-1)\n snake.pop()\n snake.insert(0,snakehead)\n elif choice == 's':\n snakehead = (snakehead[0]+1,snakehead[1])\n snake.pop()\n snake.insert(0,snakehead)\n elif choice == 'd':\n snakehead = (snakehead[0],snakehead[1]+1)\n snake.pop()\n snake.insert(0,snakehead)\n\n if snake[0] in food:\n food.remove(snake[0])\n new = (snake[-1][0],snake[-1][1])\n\n\n print(*board, sep='\\n')\n\n\n\n\n","repo_name":"ichbinjay/helloProgramming","sub_path":"src/snakeGame.py","file_name":"snakeGame.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"30171720328","text":"budget = float(input())\nstatisti= int(input())\nprice_for_clothes_per_statist = float(input())\n\ndecor = budget * 0.1\n\ntotal_for_clothes = statisti * price_for_clothes_per_statist\n\nif statisti > 150:\n total_for_clothes -= 0.1 * total_for_clothes\n\nexpenses = decor + total_for_clothes\ndiff = abs(budget - expenses)\n\nif expenses > budget:\n print(\"Not enough money!\")\n print(f\"Wingard needs {diff:.2f} leva more.\")\nelse:\n print(\"Action!\")\n print(f\"Wingard starts filming with {diff:.2f} leva left.\")\n","repo_name":"IDaneva/Softuni-Software-Engineering-Python","sub_path":"Programming Basics/2 conditional statements/godzilla_vs_kong.py","file_name":"godzilla_vs_kong.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"27332449668","text":"import collections\nimport os\nimport sys\nif sys.platform.startswith(\"win\"):\n import glob\n\n\nclass GetFunction:\n\n def __init__(self):\n self.cache = {}\n self.not_found_cache = set()\n\n\n def __call__(self, module, function_name):\n function = self.cache.get((module, function_name), None)\n if (function is None and\n (module, function_name) not in self.not_found_cache):\n try:\n function = getattr(module, function_name)\n if not isinstance(function, collections.Callable):\n raise AttributeError()\n self.cache[module, function_name] = function\n except AttributeError:\n function = None\n self.not_found_cache.add((module, function_name))\n return function\n\n\ndef main():\n modules = load_modules()\n get_function = GetFunction()\n get_file_type_functions = []\n for module in modules:\n get_file_type = get_function(module, \"get_file_type\")\n if get_file_type is not None:\n get_file_type_functions.append(get_file_type)\n for file in get_files(sys.argv[1:]):\n try:\n with open(file, \"rb\") as fh:\n magic = fh.read(1000)\n for get_file_type in get_file_type_functions:\n filetype = get_file_type(magic,\n os.path.splitext(file)[1])\n if filetype is not None:\n print(\"{0:.<20}{1}\".format(filetype, file))\n break\n else:\n print(\"{0:.<20}{1}\".format(\"Unknown\", file))\n except EnvironmentError as err:\n print(err)\n\n\nif sys.platform.startswith(\"win\"):\n def get_files(names):\n for name in names:\n if os.path.isfile(name):\n yield name\n else:\n for file in glob.iglob(name):\n if not os.path.isfile(file):\n continue\n yield file\nelse:\n def get_files(names):\n return (file for file in names if os.path.isfile(file))\n\n\ndef load_modules():\n modules = []\n for name in os.listdir(os.path.dirname(__file__) or \".\"):\n if name.endswith(\".py\") and \"magic\" in name.lower():\n name = os.path.splitext(name)[0]\n if name.isidentifier() and name not in sys.modules:\n try:\n module = __import__(name)\n modules.append(module)\n except (ImportError, SyntaxError) as err:\n print(err)\n return modules\n\n\nif len(sys.argv) == 1 or sys.argv[1] in {\"-h\", \"--help\"}:\n print(\"usage: {0} file1 [file2 [... fileN]]\".format(\n os.path.basename(sys.argv[0])))\n sys.exit(2)\n\nmain()\n","repo_name":"BaneZhang/python","sub_path":"Programming_in_Python3/Examples/magic-numbers_ans.py","file_name":"magic-numbers_ans.py","file_ext":"py","file_size_in_byte":2821,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"94"} +{"seq_id":"24054552862","text":"import logging\nimport os.path as osp\n# from utils import os_utils\n\n# tensorflow_logger = logging.getLogger('tensorflow')\n# tensorflow_logger.setLevel(logging.DEBUG)\n#\n# # create formatter and add it to the handlers\n# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n# # create file handler which logs even debug messages\n# fh = logging.FileHandler(os.path.join(config.model_save_path ,'tensorflow.txt'))\n# fh.setLevel(logging.DEBUG)\n# fh.setFormatter(formatter)\n# tensorflow_logger.addHandler(fh)\n\n\n# Copyright (c) 2014 Markus Pointner\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nclass _AnsiColorStreamHandler(logging.StreamHandler):\n DEFAULT = '\\x1b[0m'\n RED = '\\x1b[31m'\n GREEN = '\\x1b[32m'\n YELLOW = '\\x1b[33m'\n CYAN = '\\x1b[36m'\n\n CRITICAL = RED\n ERROR = RED\n WARNING = YELLOW\n INFO = DEFAULT # GREEN\n DEBUG = CYAN\n\n @classmethod\n def _get_color(cls, level):\n if level >= logging.CRITICAL: return cls.CRITICAL\n elif level >= logging.ERROR: return cls.ERROR\n elif level >= logging.WARNING: return cls.WARNING\n elif level >= logging.INFO: return cls.INFO\n elif level >= logging.DEBUG: return cls.DEBUG\n else: return cls.DEFAULT\n\n def __init__(self, stream=None):\n logging.StreamHandler.__init__(self, stream)\n\n def format(self, record):\n text = logging.StreamHandler.format(self, record)\n color = self._get_color(record.levelno)\n return (color + text + self.DEFAULT) if self.is_tty() else text\n\n def is_tty(self):\n isatty = getattr(self.stream, 'isatty', None)\n return isatty and isatty()\n\n\nclass _WinColorStreamHandler(logging.StreamHandler):\n # wincon.h\n FOREGROUND_BLACK = 0x0000\n FOREGROUND_BLUE = 0x0001\n FOREGROUND_GREEN = 0x0002\n FOREGROUND_CYAN = 0x0003\n FOREGROUND_RED = 0x0004\n FOREGROUND_MAGENTA = 0x0005\n FOREGROUND_YELLOW = 0x0006\n FOREGROUND_GREY = 0x0007\n FOREGROUND_INTENSITY = 0x0008 # foreground color is intensified.\n FOREGROUND_WHITE = FOREGROUND_BLUE | FOREGROUND_GREEN | FOREGROUND_RED\n\n BACKGROUND_BLACK = 0x0000\n BACKGROUND_BLUE = 0x0010\n BACKGROUND_GREEN = 0x0020\n BACKGROUND_CYAN = 0x0030\n BACKGROUND_RED = 0x0040\n BACKGROUND_MAGENTA = 0x0050\n BACKGROUND_YELLOW = 0x0060\n BACKGROUND_GREY = 0x0070\n BACKGROUND_INTENSITY = 0x0080 # background color is intensified.\n\n DEFAULT = FOREGROUND_WHITE\n CRITICAL = BACKGROUND_YELLOW | FOREGROUND_RED | FOREGROUND_INTENSITY | BACKGROUND_INTENSITY\n ERROR = FOREGROUND_RED | FOREGROUND_INTENSITY\n WARNING = FOREGROUND_YELLOW | FOREGROUND_INTENSITY\n INFO = FOREGROUND_GREEN\n DEBUG = FOREGROUND_CYAN\n\n @classmethod\n def _get_color(cls, level):\n if level >= logging.CRITICAL: return cls.CRITICAL\n elif level >= logging.ERROR: return cls.ERROR\n elif level >= logging.WARNING: return cls.WARNING\n elif level >= logging.INFO: return cls.INFO\n elif level >= logging.DEBUG: return cls.DEBUG\n else: return cls.DEFAULT\n\n def _set_color(self, code):\n import ctypes\n ctypes.windll.kernel32.SetConsoleTextAttribute(self._outhdl, code)\n\n def __init__(self, stream=None):\n logging.StreamHandler.__init__(self, stream)\n # get file handle for the stream\n import ctypes, ctypes.util\n # for some reason find_msvcrt() sometimes doesn't find msvcrt.dll on my system?\n crtname = ctypes.util.find_msvcrt()\n if not crtname:\n crtname = ctypes.util.find_library(\"msvcrt\")\n crtlib = ctypes.cdll.LoadLibrary(crtname)\n self._outhdl = crtlib._get_osfhandle(self.stream.fileno())\n\n def emit(self, record):\n color = self._get_color(record.levelno)\n self._set_color(color)\n logging.StreamHandler.emit(self, record)\n self._set_color(self.FOREGROUND_WHITE)\n\n# select ColorStreamHandler based on platform\nimport platform\nif platform.system() == 'Windows':\n ColorStreamHandler = _WinColorStreamHandler\nelse:\n ColorStreamHandler = _AnsiColorStreamHandler\n\ndef get_logging_dict(name,mode='w'):\n return {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'standard': {\n 'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'\n },\n },\n 'handlers': {\n 'stderr': {\n 'level': 'INFO',\n 'formatter': 'standard',\n 'class': 'utils.log_utils.ColorStreamHandler',\n 'stream': 'ext://sys.stderr',\n },\n 'logfile': {\n 'level': 'DEBUG',\n 'formatter': 'standard',\n 'class': 'logging.FileHandler',\n 'filename': name,\n 'mode': mode,\n }\n },\n 'loggers': {\n '': {\n 'handlers': ['stderr', 'logfile'],\n 'level': 'DEBUG',\n 'propagate': True\n },\n\n # extra ones to shut up.\n 'tensorflow': {\n 'handlers': ['stderr', 'logfile'],\n 'level': 'INFO',\n },\n }\n }\n\ndef create_logger(log_file):\n import logging.config\n logging.config.dictConfig(get_logging_dict(log_file))\n filename = osp.basename(log_file)\n logger = logging.getLogger(filename)\n return logger\n\ndef get_logger_by_tag(tag):\n logger = logging.getLogger(tag)\n return logger\n\n\n","repo_name":"ahmdtaha/knowledge_evolution","sub_path":"utils/log_utils.py","file_name":"log_utils.py","file_ext":"py","file_size_in_byte":6684,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"94"} +{"seq_id":"73179498230","text":"import os\r\nimport pickle\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom dipgnn.utils.register import registers\r\nfrom sklearn.preprocessing import StandardScaler\r\n\r\n\r\n@registers.data_container.register(\"data_container\")\r\nclass DataContainer(object):\r\n def __init__(\r\n self,\r\n graph_data_list,\r\n target_data_list,\r\n target_normalizer=None,\r\n target_col=\"targets\",\r\n target_type=\"atom\",\r\n task_type=\"regression\",\r\n ):\r\n self.graph_data_list = np.array(graph_data_list)\r\n\r\n atom_features = graph_data_list[0][\"atom_features_list\"]\r\n if isinstance(atom_features[0], (np.int32, np.int64, int)):\r\n self.atom_feature_scheme = \"specie_onehot\"\r\n elif isinstance(atom_features[0], (list, np.ndarray, tuple, pd.Series)):\r\n self.atom_feature_scheme = \"external\"\r\n self.atom_feature_len = atom_features.shape[-1]\r\n else:\r\n raise ValueError(\"Cannot determine atom_feature_scheme\", atom_features[0])\r\n\r\n self.target_data_list = np.array(target_data_list)\r\n\r\n self.target_type = target_type\r\n self.task_type = task_type\r\n self.target_col = target_col\r\n\r\n if task_type == \"regression\":\r\n if target_normalizer is None:\r\n targets = list()\r\n for target_data in target_data_list:\r\n if target_type == \"structure\":\r\n targets.append(target_data[target_col])\r\n else:\r\n targets += list(target_data[target_col])\r\n self.target_normalizer = StandardScaler().fit(np.array(targets).reshape(-1, 1))\r\n else:\r\n self.target_normalizer = target_normalizer\r\n else:\r\n self.target_normalizer = None\r\n\r\n @classmethod\r\n def from_files(\r\n cls,\r\n graph_data_file_list,\r\n targets_data_file,\r\n target_normalizer=None,\r\n target_col=\"targets\",\r\n target_type=\"atom\",\r\n task_type=\"regression\"\r\n ):\r\n\r\n graph_data_list = list()\r\n\r\n for graph_data_file in graph_data_file_list:\r\n try:\r\n with open(graph_data_file, \"rb\") as f:\r\n graph_data = pickle.load(f)\r\n graph_data_list.append(graph_data)\r\n except Exception:\r\n raise ValueError(\"Please provide paths to the pickled graph_data, \"\r\n \"and make sure the files can be pickle.load\")\r\n\r\n try:\r\n with open(targets_data_file, \"rb\") as f:\r\n target_data_list = pickle.load(f)\r\n except Exception:\r\n raise ValueError(\"Please provide paths to the pickled target_data_list, \"\r\n \"and make sure the files can be pickle.load\")\r\n\r\n return cls(graph_data_list=graph_data_list,\r\n target_data_list=target_data_list,\r\n target_normalizer=target_normalizer,\r\n target_type=target_type,\r\n task_type=task_type,\r\n target_col=target_col\r\n )\r\n\r\n @classmethod\r\n def from_structures(\r\n cls,\r\n structure_list=None,\r\n targets_list=None,\r\n source_ids=None,\r\n target_type=\"atom\",\r\n task_type=\"regression\",\r\n neighbor_scheme=\"external\",\r\n neighbor_cutoff=4.0,\r\n external_neighbors_list=None,\r\n bond_distances_list=None,\r\n atom_feature_scheme=\"external\",\r\n external_atom_features_list=None,\r\n specie_to_features=None, # for example, {\"Al1\": 0, \"Sm1\": 1}\r\n output_graph_data=True,\r\n output_targets=True,\r\n save_graph_data_batch_size=1,\r\n output_path=None):\r\n\r\n batch_graph_data_list = list()\r\n total_graph_data_list = list()\r\n total_target_data_list = list()\r\n batch_id = 0\r\n\r\n for structure_id, structure in enumerate(structure_list):\r\n atom_features_list = list()\r\n dist_list = list()\r\n angle_kj_list = list()\r\n angle_im_list = list()\r\n id_i_list = list()\r\n id_j_list = list()\r\n angle_kj_reduce_to_dist_list = list()\r\n angle_im_reduce_to_dist_list = list()\r\n\r\n map_ji_to_dist_id = dict()\r\n dist_kj_expand_to_angle_meta_list = list()\r\n dist_im_expand_to_angle_meta_list = list()\r\n dist_kj_ji_expand_to_angle_meta_list = list()\r\n dist_im_ji_expand_to_angle_meta_list = list()\r\n\r\n dist_id = 0\r\n nn_infos = list()\r\n if neighbor_scheme == \"external\":\r\n for i in range(len(structure)):\r\n i_coords = structure[i].coords\r\n\r\n js = external_neighbors_list[structure_id][i]\r\n\r\n js_to_i_frac = structure.frac_coords[js] - structure[i].frac_coords\r\n js_cross_pbc = np.abs(np.round(js_to_i_frac)).sum(axis=1) > 0\r\n\r\n js_to_i_frac_image = js_to_i_frac - np.round(js_to_i_frac)\r\n js_frac_coords = structure[i].frac_coords + js_to_i_frac_image\r\n\r\n js_coords = np.dot(js_frac_coords, structure.lattice.matrix)\r\n\r\n if bond_distances_list is None:\r\n dists = np.linalg.norm(js_coords - i_coords, axis=1)\r\n else:\r\n dists = bond_distances_list[structure_id][i]\r\n\r\n nn_infos.append({\"nn_ids\": js,\r\n \"nn_coords_image\": js_coords,\r\n \"nn_frac_coords_image\": js_frac_coords,\r\n \"nn_dists\": dists,\r\n \"nn_cross_pbc\": js_cross_pbc,\r\n \"n_neighbors\": len(js)})\r\n\r\n elif neighbor_scheme == \"pmg_cutoff\":\r\n for i in range(len(structure)):\r\n nns = structure.get_neighbors(structure[i], r=neighbor_cutoff)\r\n nn_infos.append({\"nn_ids\": [nn[2] for nn in nns],\r\n \"nn_coords_image\": [nn[0].coords for nn in nns],\r\n \"nn_sites_image\": [nn[0] for nn in nns],\r\n \"nn_dists\": [nn[1] for nn in nns],\r\n \"nn_cross_pbc\": [True if np.abs(nn[3]).sum() != 0 else False for nn in nns],\r\n \"n_neighbors\": len(nns)})\r\n\r\n else:\r\n raise ValueError(\"No support for neighbor_scheme = {}\".format(neighbor_scheme))\r\n\r\n for i in range(len(structure)):\r\n # i's coords and features\r\n i_coords = structure[i].coords\r\n\r\n if atom_feature_scheme == \"external\":\r\n i_feature = external_atom_features_list[structure_id][i]\r\n elif atom_feature_scheme == \"specie_onehot\":\r\n i_feature = specie_to_features[structure.sites[i].species.alphabetical_formula]\r\n else:\r\n raise ValueError(\"No support for atom_feature_scheme = {}\".format(atom_feature_scheme))\r\n\r\n atom_features_list.append(i_feature)\r\n\r\n # iterate neighbors of i, namely j\r\n nn_info = nn_infos[i]\r\n for idx in range(nn_info[\"n_neighbors\"]):\r\n j = nn_info[\"nn_ids\"][idx]\r\n j_coords = nn_info[\"nn_coords_image\"][idx]\r\n dist = nn_info[\"nn_dists\"][idx]\r\n j_cross_pbc = nn_info[\"nn_cross_pbc\"][idx]\r\n\r\n if neighbor_scheme == \"external\":\r\n j_frac_coords = nn_info[\"nn_frac_coords_image\"][idx]\r\n\r\n elif neighbor_scheme == \"pmg_cutoff\":\r\n j_site = nn_info[\"nn_sites_image\"][idx]\r\n\r\n else:\r\n raise ValueError(\"No support for neighbor_scheme = {}\".format(neighbor_scheme))\r\n\r\n map_ji_to_dist_id[\"{}_{}\".format(j, i)] = dist_id\r\n\r\n id_i_list.append(i)\r\n id_j_list.append(j)\r\n\r\n dist_list.append(dist)\r\n\r\n # k: neighbors of j\r\n ks = nn_infos[j][\"nn_ids\"]\r\n if j_cross_pbc:\r\n if neighbor_scheme == \"external\":\r\n ks_to_j_frac = structure.frac_coords[ks] - j_frac_coords\r\n ks_to_j_frac_image = ks_to_j_frac - np.round(ks_to_j_frac)\r\n ks_coords = np.dot(j_frac_coords + ks_to_j_frac_image, structure.lattice.matrix)\r\n\r\n elif neighbor_scheme == \"pmg_cutoff\":\r\n j_nns_image = structure.get_neighbors(j_site, r=neighbor_cutoff)\r\n ks_coords = np.array([nn[0].coords for nn in j_nns_image])\r\n\r\n else:\r\n raise ValueError(\"No support for neighbor_scheme = {}\".format(neighbor_scheme))\r\n else:\r\n ks_coords = nn_infos[j][\"nn_coords_image\"]\r\n\r\n for k, k_coords in zip(ks, ks_coords):\r\n if (k != i) and (k != j):\r\n # Angle: ij vs jk\r\n vector1 = i_coords - j_coords\r\n vector2 = j_coords - k_coords\r\n angle_kj_ji = DataContainer._calculate_neighbor_angles(vector1, vector2)\r\n angle_kj_list.append(angle_kj_ji)\r\n\r\n dist_kj_expand_to_angle_meta_list.append(\"{}_{}\".format(k, j))\r\n dist_kj_ji_expand_to_angle_meta_list.append(\"{}_{}\".format(j, i))\r\n angle_kj_reduce_to_dist_list.append(dist_id)\r\n\r\n # m: neighbors of i\r\n ms = nn_infos[i][\"nn_ids\"]\r\n m_coords = nn_infos[i][\"nn_coords_image\"]\r\n for m, m_coords in zip(ms, m_coords):\r\n if (m != j):\r\n # Angle: im vs ji\r\n vector1 = i_coords - j_coords\r\n vector2 = i_coords - m_coords\r\n angle_im_ji = DataContainer._calculate_neighbor_angles(vector1, vector2)\r\n angle_im_list.append(angle_im_ji)\r\n\r\n dist_im_expand_to_angle_meta_list.append(\"{}_{}\".format(m, i))\r\n dist_im_ji_expand_to_angle_meta_list.append(\"{}_{}\".format(j, i))\r\n\r\n angle_im_reduce_to_dist_list.append(dist_id)\r\n dist_id += 1\r\n\r\n dist_kj_expand_to_angle_list = list(map(lambda x: map_ji_to_dist_id[x], dist_kj_expand_to_angle_meta_list))\r\n dist_im_expand_to_angle_list = list(map(lambda x: map_ji_to_dist_id[x], dist_im_expand_to_angle_meta_list))\r\n dist_kj_ji_expand_to_angle_list = list(map(lambda x: map_ji_to_dist_id[x], dist_kj_ji_expand_to_angle_meta_list))\r\n dist_im_ji_expand_to_angle_list = list(map(lambda x: map_ji_to_dist_id[x], dist_im_ji_expand_to_angle_meta_list))\r\n\r\n # now deal with targets\r\n targets = targets_list[structure_id]\r\n \r\n if target_type == \"structure\":\r\n # if structure-level targets, no need to deal with targets\r\n reduce_to_target_indices = [0] * len(structure)\r\n \r\n elif target_type == \"atom\":\r\n if isinstance(targets, dict):\r\n reduce_to_target_indices = list(targets.keys())\r\n targets = list(targets.values())\r\n elif isinstance(targets, (list, np.ndarray, tuple, pd.Series)):\r\n reduce_to_target_indices = np.arange(len(structure))\r\n else:\r\n raise ValueError(\"Targets_list of atom-level dataset: \"\r\n \"Only supports list of lists or list of dicts\")\r\n\r\n elif target_type == \"bond\":\r\n if isinstance(targets, dict):\r\n bond_indices = [\"{}_{}\".format(x, y) for x, y in targets.keys()]\r\n reduce_to_target_indices = list(map(lambda x: map_ji_to_dist_id[x], bond_indices))\r\n targets = list(targets.values())\r\n else:\r\n raise ValueError(\"Targets_list of bond-level dataset: \"\r\n \"Only supports list of dicts, with the key as tuple of bond indices\")\r\n\r\n else:\r\n raise ValueError(\"Only supports structure, atom or bond-level targets\")\r\n\r\n graph_data_dict = {\r\n \"atom_features_list\": np.array(atom_features_list).astype(np.float32 if atom_feature_scheme==\"external\" else np.int32),\r\n \"dist_list\": np.array(dist_list).astype(np.float32),\r\n \"angle_kj_list\": np.array(angle_kj_list).astype(np.float32),\r\n \"angle_im_list\": np.array(angle_im_list).astype(np.float32),\r\n \"id_i_list\": np.array(id_i_list).astype(np.int32),\r\n \"id_j_list\": np.array(id_j_list).astype(np.int32),\r\n \"dist_kj_expand_to_angle_list\": np.array(dist_kj_expand_to_angle_list).astype(np.int32),\r\n \"dist_im_expand_to_angle_list\": np.array(dist_im_expand_to_angle_list).astype(np.int32),\r\n \"dist_kj_ji_expand_to_angle_list\": np.array(dist_kj_ji_expand_to_angle_list).astype(np.int32),\r\n \"dist_im_ji_expand_to_angle_list\": np.array(dist_im_ji_expand_to_angle_list).astype(np.int32),\r\n \"angle_kj_reduce_to_dist_list\": np.array(angle_kj_reduce_to_dist_list).astype(np.int32),\r\n \"angle_im_reduce_to_dist_list\": np.array(angle_im_reduce_to_dist_list).astype(np.int32),\r\n \"n_structures\": 1,\r\n }\r\n target_data_dict = {\r\n \"reduce_to_target_indices\": np.array(reduce_to_target_indices).astype(np.int32),\r\n \"targets\": np.array(targets).astype(np.float32 if task_type == \"regression\" else np.int32)}\r\n\r\n total_graph_data_list.append(graph_data_dict)\r\n total_target_data_list.append(target_data_dict)\r\n\r\n if output_graph_data and save_graph_data_batch_size is not None:\r\n os.makedirs(output_path, exist_ok=True)\r\n\r\n # save the pkl file of each graph sample to file. No need to recalculate anymore.\r\n if save_graph_data_batch_size == 1:\r\n graph_data_id = source_ids[structure_id] if source_ids is not None else structure_id\r\n with open(os.path.join(output_path, \"graph_data_{}.pkl\".format(graph_data_id)), \"wb\") as f:\r\n pickle.dump(graph_data_dict, f, protocol=4)\r\n\r\n else:\r\n batch_graph_data_list.append(graph_data_dict)\r\n # Default: drop last\r\n if (structure_id + 1) % save_graph_data_batch_size == 0:\r\n with open(os.path.join(output_path, \"graph_data_batchsize{}_{}.pkl\".format(\r\n save_graph_data_batch_size, batch_id)), \"wb\") as f:\r\n pickle.dump(batch_graph_data_list, f, protocol=4)\r\n batch_id += 1\r\n batch_graph_data_list = list()\r\n\r\n if output_graph_data and save_graph_data_batch_size == len(structure_list):\r\n os.makedirs(output_path, exist_ok=True)\r\n with open(os.path.join(output_path, \"graph_data.pkl\"), \"wb\") as f:\r\n pickle.dump(total_graph_data_list, f, protocol=4)\r\n\r\n if output_targets:\r\n os.makedirs(output_path, exist_ok=True)\r\n with open(os.path.join(output_path, \"target_data.pkl\"), \"wb\") as f:\r\n pickle.dump(total_target_data_list, f, protocol=4)\r\n\r\n return cls(graph_data_list=total_graph_data_list,\r\n target_data_list=total_target_data_list,\r\n target_type=target_type,\r\n task_type=task_type)\r\n\r\n @staticmethod\r\n def collate_pool(graph_data_slice, target_data_slice,\r\n target_type=\"atom\", task_type=\"regression\",\r\n target_normalizer=None, target_col=\"targets\"):\r\n\r\n n_structures = len(graph_data_slice)\r\n if n_structures == 1:\r\n input_target_data_dict_for_slice = graph_data_slice[0]\r\n\r\n input_target_data_dict_for_slice[\"reduce_to_target_indices\"] = target_data_slice[0][\"reduce_to_target_indices\"]\r\n\r\n targets = target_data_slice[0][target_col]\r\n\r\n input_target_data_dict_for_slice[\"n_structures\"] = 1\r\n else:\r\n atom_features_list = list()\r\n dist_list = list()\r\n angle_kj_list = list()\r\n angle_im_list = list()\r\n id_i_list = list()\r\n id_j_list = list()\r\n dist_kj_expand_to_angle_list = list()\r\n dist_im_expand_to_angle_list = list()\r\n dist_kj_ji_expand_to_angle_list = list()\r\n dist_im_ji_expand_to_angle_list = list()\r\n angle_kj_reduce_to_dist_list = list()\r\n angle_im_reduce_to_dist_list = list()\r\n reduce_to_target_indices = list()\r\n targets = list()\r\n\r\n atom_num = 0\r\n dist_num = 0\r\n\r\n for idx, (graph_data_dict, target_data_dict) in enumerate(zip(graph_data_slice, target_data_slice)):\r\n atom_features_list.append(graph_data_dict[\"atom_features_list\"])\r\n dist_list.append(graph_data_dict[\"dist_list\"])\r\n angle_kj_list.append(graph_data_dict[\"angle_kj_list\"])\r\n angle_im_list.append(graph_data_dict[\"angle_im_list\"])\r\n id_i_list.append(graph_data_dict[\"id_i_list\"] + atom_num)\r\n id_j_list.append(graph_data_dict[\"id_j_list\"] + atom_num)\r\n dist_kj_expand_to_angle_list.append(graph_data_dict[\"dist_kj_expand_to_angle_list\"] + dist_num)\r\n dist_im_expand_to_angle_list.append(graph_data_dict[\"dist_im_expand_to_angle_list\"] + dist_num)\r\n dist_kj_ji_expand_to_angle_list.append(graph_data_dict[\"dist_kj_ji_expand_to_angle_list\"] + dist_num)\r\n dist_im_ji_expand_to_angle_list.append(graph_data_dict[\"dist_im_ji_expand_to_angle_list\"] + dist_num)\r\n angle_kj_reduce_to_dist_list.append(graph_data_dict[\"angle_kj_reduce_to_dist_list\"] + dist_num)\r\n angle_im_reduce_to_dist_list.append(graph_data_dict[\"angle_im_reduce_to_dist_list\"] + dist_num)\r\n\r\n if target_type == \"structure\":\r\n reduce_to_target_indices.append(target_data_dict[\"reduce_to_target_indices\"] + idx)\r\n elif target_type == \"atom\":\r\n reduce_to_target_indices.append(target_data_dict[\"reduce_to_target_indices\"] + atom_num)\r\n elif target_type == \"bond\":\r\n reduce_to_target_indices.append(target_data_dict[\"reduce_to_target_indices\"] + dist_num)\r\n\r\n targets.append(target_data_dict[target_col])\r\n\r\n atom_num += len(graph_data_dict[\"atom_features_list\"])\r\n dist_num += len(graph_data_dict[\"dist_list\"])\r\n\r\n # return a dict,containing input and target\r\n input_target_data_dict_for_slice = {\r\n \"atom_features_list\": np.concatenate(atom_features_list),\r\n \"dist_list\": np.concatenate(dist_list),\r\n \"angle_kj_list\": np.concatenate(angle_kj_list),\r\n \"angle_im_list\": np.concatenate(angle_im_list),\r\n \"id_i_list\": np.concatenate(id_i_list),\r\n \"id_j_list\": np.concatenate(id_j_list),\r\n \"dist_kj_expand_to_angle_list\": np.concatenate(dist_kj_expand_to_angle_list),\r\n \"dist_im_expand_to_angle_list\": np.concatenate(dist_im_expand_to_angle_list),\r\n \"dist_kj_ji_expand_to_angle_list\": np.concatenate(dist_kj_ji_expand_to_angle_list),\r\n \"dist_im_ji_expand_to_angle_list\": np.concatenate(dist_im_ji_expand_to_angle_list),\r\n \"angle_kj_reduce_to_dist_list\": np.concatenate(angle_kj_reduce_to_dist_list),\r\n \"angle_im_reduce_to_dist_list\": np.concatenate(angle_im_reduce_to_dist_list),\r\n \"n_structures\": n_structures,\r\n \"reduce_to_target_indices\": np.concatenate(reduce_to_target_indices),\r\n }\r\n\r\n if target_type != \"structure\":\r\n targets = np.concatenate(targets)\r\n else:\r\n targets = np.array(targets)\r\n\r\n # transform targets\r\n if task_type == \"regression\":\r\n if len(targets.shape) == 1:\r\n targets = targets[:, np.newaxis]\r\n\r\n if target_normalizer:\r\n targets = target_normalizer.transform(targets)\r\n\r\n else:\r\n if len(targets.shape) != 2 or targets.shape[1] < 2:\r\n raise ValueError(\"The targets for classification task_types should be 2-dimensional, \"\r\n \"and target value should be one-hot type with n_classes > 1.\")\r\n input_target_data_dict_for_slice[\"targets\"] = targets\r\n\r\n return input_target_data_dict_for_slice\r\n\r\n @staticmethod\r\n def _calculate_neighbor_angles(vector1, vector2):\r\n x = np.sum(vector1 * vector2, axis=-1)\r\n y = np.cross(vector1, vector2)\r\n y = np.linalg.norm(y, axis=-1)\r\n angle = np.arctan2(y, x)\r\n return angle\r\n\r\n def __len__(self):\r\n return len(self.graph_data_list)\r\n\r\n @staticmethod\r\n def int_keys():\r\n return ['id_i_list', 'id_j_list',\r\n 'dist_kj_expand_to_angle_list', 'dist_im_expand_to_angle_list',\r\n 'dist_kj_ji_expand_to_angle_list', 'dist_im_ji_expand_to_angle_list',\r\n 'angle_kj_reduce_to_dist_list', 'angle_im_reduce_to_dist_list',\r\n \"reduce_to_target_indices\"]\r\n\r\n @staticmethod\r\n def float_keys():\r\n return ['dist_list', 'angle_kj_list', 'angle_im_list']\r\n\r\n @staticmethod\r\n def int_number_keys():\r\n return [\"n_structures\"]\r\n\r\n def __getitem__(self, idx):\r\n return self.collate_pool(\r\n graph_data_slice=self.graph_data_list[idx],\r\n target_data_slice=self.target_data_list[idx],\r\n target_type=self.target_type,\r\n task_type=self.task_type,\r\n target_normalizer=self.target_normalizer,\r\n target_col=self.target_col\r\n )\r\n","repo_name":"Qi-max/DiPGNN","sub_path":"dipgnn/data/data_container.py","file_name":"data_container.py","file_ext":"py","file_size_in_byte":22887,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"8242991045","text":"from Crypto.Util.Padding import pad\nfrom Crypto.Cipher import AES\nimport os\n\nkey = os.urandom(2) + (\"A\" * 14).encode('utf-8')\n\n\ndef encrypt(key, msg):\n\tcrypto = AES.new(key, AES.MODE_CBC,key)\n\treturn crypto.encrypt(pad(msg, 16))\n\n\ndt = open('top_secret_Gergert.txt', 'rb').read()\nenc_dt = encrypt(key, dt)\n\nopen('top_secret_Gergert.evil_ransomware_29', 'wb').write(enc_dt)\nos.remove('top_secret_Gergert.txt')\n","repo_name":"whoamins/KCTF2022","sub_path":"Cryptography/CBC IV/CBCIV.py","file_name":"CBCIV.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"42335650155","text":"class Solution:\n def isValid(self, s: str) -> bool:\n length=len(s)\n\n #No need of this after IndexError exception being handled\n if length == 1:\n return False\n\n keys={'(':')', '{': '}', '[':']'}\n i=0\n l=list(s)\n while i < length:\n print(i, l, length)\n e=l[i]\n try:\n if keys[e] == l[i+1]:\n l.pop(i+1),l.pop(i)\n if i >0: i-=1\n else:\n i+=1\n except (KeyError,IndexError):\n #return False if its a closing bracket\n #or the string ends with a opening bracket\n return False\n length=len(l)\n return not i\n\ns = \"(()){}{}[[[]]][\"\nprint(Solution().isValid(s))","repo_name":"hrutvik051/leetcode_solutions","sub_path":"20.py","file_name":"20.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"7583693570","text":"import sys\nfrom collections import deque\n\ndef is_balanced(line):\n q = deque()\n\n for ch in line:\n if ch == '(':\n q.append(ch)\n elif ch == '[':\n q.append(ch)\n elif ch == ')':\n if len(q) == 0:\n return False\n if q.pop() != '(':\n return False\n elif ch == ']':\n if len(q) == 0:\n return False\n if q.pop() != '[':\n return False\n if len(q) > 0:\n return False\n\n return True\n\n\nwhile True:\n line = sys.stdin.readline().rstrip()\n if line == '.':\n break\n\n if is_balanced(line):\n print('yes')\n else:\n print('no')","repo_name":"wafflejuice/problem-solving-practice","sub_path":"BOJ/[4949]균형잡힌 세상.py","file_name":"[4949]균형잡힌 세상.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"7844787654","text":"import json\nimport csv\nimport argparse\nfrom pathlib import Path\nimport os\nimport math\nfrom tqdm import tqdm\nfrom collections import defaultdict\nimport numpy as np\nimport random\n\nimport datasets\nfrom datasets import load_dataset, Dataset, DatasetDict\nfrom torch.utils.data import DataLoader\nimport torch\nfrom transformers import (\n T5ForConditionalGeneration, set_seed,\n AutoConfig,\n BertTokenizer, BertModel, \n AutoTokenizer\n)\nimport transformers\nfrom accelerate import Accelerator\n\nclass ArgumentManager: \n @staticmethod\n def globalModelArguments(parser):\n group = parser.add_argument_group('Global model parameter that will suit on all model.')\n group.add_argument(\n \"--fp16\", action=\"store_true\",\n help=\"If you want to use fp16 or not.\",\n )\n\n @staticmethod\n def wwModelArguments(parser):\n group = parser.add_argument_group('Word embedding model')\n group.add_argument(\n \"--we_model_name_or_path\", type=str,\n help=\"Word embedding model name / path.\",\n )\n group.add_argument(\n \"--we_config_name\", type=str, default=None,\n help=\"Word embedding config name / path. If is None then set to model_name_or_path\",\n )\n group.add_argument(\n \"--we_tokenizer_name\", type=str, default=None,\n help=\"Word embedding tokenizer name / path. If is None then set to model_name_or_path\",\n )\n \n @staticmethod\n def ttModelArguments(parser):\n group = parser.add_argument_group('Two towel model')\n group.add_argument(\n \"--tt_dim_hidden\", type=int, default=500,\n help=\"Hidden dim inside two towel model's EncodingModel\",\n )\n group.add_argument(\n \"--tt_n_hidden\", type=int, default=2,\n help=\"Number of hidden layers inside EncodingModel\",\n )\n group.add_argument(\n \"--tt_p_dropout\", type=float, default=0.1,\n help=\"Dropout probability of EncodingModel\",\n )\n group.add_argument(\n \"--tt_dim_encoding\", type=int, default=500,\n help=\"Encoding dim of two towel model. Will be the final user/item embedding dimension.\",\n )\n\n @staticmethod\n def fileArguments(parser):\n group = parser.add_argument_group('Files')\n group.add_argument(\n \"--user_file\", type=str, default=None,\n help=\"User feature file. i.e. users.csv\",\n )\n group.add_argument(\n \"--item_file\", type=str, default=None,\n help=\"Item / course feature file. i.e. courses.csv\",\n )\n group.add_argument(\n \"--train_item_file\", type=str, default=None,\n help=\"Train item file. i.e. train.csv\",\n )\n group.add_argument(\n \"--chapter_file\", type=str, default=None,\n help=\"Chapter for course. i.e. course_chapter_items.csv\",\n )\n group.add_argument(\n \"--subgroup_file\", type=str, default=None,\n help=\"Mapping between subgroup and index. i.e. subgroups.csv\",\n )\n group.add_argument(\n \"--test_item_seen_file\", type=str, default=None,\n help=\"i.e. test_seen.csv\",\n )\n group.add_argument(\n \"--test_item_unseen_file\", type=str, default=None,\n help=\"i.e. test_unseen.csv\",\n )\n\n @staticmethod\n def saveLoadArguments(parser):\n group = parser.add_argument_group('Saving & Loading Files')\n group.add_argument(\n \"--load_feature_ds\", type=str, default=None,\n help=\"Feature dataset loading path\",\n )\n group.add_argument(\n \"--load_useritem_example_ds\", type=str, default=None,\n help=\"User item example dataset loading path\",\n )\n group.add_argument(\n \"--load_tt_model_dir\", type=str, default=None,\n help=\"Load directory for two-towel model. Used when predicting or continuing training\",\n )\n group.add_argument(\n \"--save_eds\", type=str, default=None,\n help=\"Save path for user-item example dataset\",\n )\n group.add_argument(\n \"--save_fdss\", type=str, default=None,\n help=\"Save path for user-item feature datasets\",\n )\n group.add_argument(\n \"--save_tt_model_dir\", type=str, default=None,\n help=\"Save directory for two-towel model, and its parameters\",\n )\n group.add_argument(\n \"--save_rec_path\", type=str, default=None,\n help=\"Save path for recommendation result. Should be a directory\",\n )\n\n @staticmethod\n def preprocessArguments(parser):\n group = parser.add_argument_group('Preprocessing')\n group.add_argument(\n \"--max_length\", type=int, default=256,\n help=\"Max length for tokenizer for input (text)\",\n )\n\n @staticmethod\n def trainingArguments(parser):\n group = parser.add_argument_group('Training')\n group.add_argument(\n \"--train_batch_size\", type=int, default=512,\n help=\"Training batch size\",\n )\n group.add_argument(\n \"--eval_batch_size\", type=int, default=8,\n help=\"Evaluation batch size\",\n )\n group.add_argument(\n \"--tt_ns_w\", type=float, default=0.1,\n help=\"Weight given to negative sample in weighted matrix factorization loss function\",\n )\n group.add_argument(\n \"--num_epochs\", type=int, default=10,\n help=\"Number of training epochs\",\n )\n group.add_argument(\n \"--learning_rate\", type=float, default=1e-3,\n help=\"Learning rate\",\n )\n\n @staticmethod\n def taskArguments(parser):\n group = parser.add_argument_group(\"Tasks specifier, since I didn't separate everything into its own script.\")\n group.add_argument(\n \"--two_towel_task\", type=str, default=\"train\",\n help=\"Task specifier, default to 'train'.\",\n choices=[\"train\", \"predict\"]\n )\n group.add_argument(\n \"--feature_extract_task\", type=str, default=\"train\",\n help=\"Task specifier, default to 'eds'.\",\n choices=[\"eds\", \"fdss\"]\n )\n\n @staticmethod\n def getAllFnLs():\n return [\n ArgumentManager.globalModelArguments,\n ArgumentManager.wwModelArguments,\n ArgumentManager.ttModelArguments,\n ArgumentManager.fileArguments,\n ArgumentManager.saveLoadArguments,\n ArgumentManager.preprocessArguments,\n ArgumentManager.trainingArguments,\n ArgumentManager.taskArguments,\n ]\n\ndef parse_args(fn_ls=None):\n \"parse all arguments with all functions in fn_ls, which will all be called with args = [parser].\"\n parser = argparse.ArgumentParser(description=\"Turn user and item CSV into features\")\n if fn_ls is None:\n fn_ls = ArgumentManager.getAllFnLs()\n for fn in fn_ls:\n fn(parser)\n args = parser.parse_args()\n\n return args\n\ndef setAcceleratorLoggingVerbosity(accelerator):\n if accelerator.is_main_process:\n datasets.utils.logging.set_verbosity_warning()\n transformers.utils.logging.set_verbosity_info()\n else:\n datasets.utils.logging.set_verbosity_error()\n transformers.utils.logging.set_verbosity_error()\n\ndef loadModelStr(model_name_or_path, config_name):\n \"Just a version that does not just take the argument\"\n # config\n if config_name:\n config = AutoConfig.from_pretrained(config_name)\n elif model_name_or_path:\n config = AutoConfig.from_pretrained(model_name_or_path)\n else:\n raise ValueError(\"Did not specify config\")\n\n if model_name_or_path:\n model = BertModel.from_pretrained(\n model_name_or_path,\n from_tf=bool(\".ckpt\" in model_name_or_path),\n config=config,\n )\n else:\n raise ValueError(\"Did not specify model\")\n \n return model\n\ndef loadModelWE(args):\n \"load word embedding model (usually bert)\"\n return loadModelStr(args.we_model_name_or_path, args.we_config_name)\n\ndef loadTokenizerWE(args):\n if args.we_tokenizer_name:\n tokenizer = AutoTokenizer.from_pretrained(args.we_tokenizer_name, use_fast=True)\n elif args.we_model_name_or_path:\n tokenizer = AutoTokenizer.from_pretrained(args.we_model_name_or_path, use_fast=True)\n else:\n raise ValueError(\"Did not specify tokenizer\")\n return tokenizer\n\ndef loadDataset(args):\n \"\"\"\n Load raw datasets, directly from csv files. IDs are strings\n For reference:\n >>> loadDataset(args)\n DatasetDict({\n user: Dataset({\n features: ['user_id', 'gender', 'occupation_titles', 'interests', 'recreation_names'],\n num_rows: 130566\n })\n item: Dataset({\n features: ['course_id', 'course_name', 'course_price', 'teacher_id', 'teacher_intro', 'groups', 'sub_groups', 'topics', 'course_published_at_local', 'description', 'will_learn', 'required_tools', 'recommended_background', 'target_group'],\n num_rows: 728\n })\n subgroup: Dataset({\n features: ['subgroup_id', 'subgroup_name'],\n num_rows: 91\n })\n chapter: Dataset({\n features: ['course_id', 'chapter_no', 'chapter_id', 'chapter_name', 'chapter_item_id', 'chapter_item_no', 'chapter_item_name', 'chapter_item_type', 'video_length_in_seconds'],\n num_rows: 21290\n })\n })\n \"\"\"\n data_files = {\n 'user': args.user_file,\n 'item': args.item_file,\n 'subgroup': args.subgroup_file,\n 'chapter': args.chapter_file\n }\n return datasets.DatasetDict(\n {\n name: load_dataset(\"csv\", data_files=fname)['train']\n for name, fname in data_files.items()\n }\n )\n \ndef substituteNone(s, *lss):\n # for ls in lss, substitute None with s\n return [[s if i is None else i for i in ls] for ls in lss] \n\ndef preprocessUserExamples(examples, tokenizer, max_length, sep=\"|\"):\n \"\"\"\n preprocess user examples to the format we want, used for ds['user'].map()\n just tokenize, don't go through embedding model and concat all tensors\n user_feature = onehot(gender) || embed(occupation_titles || interests || recreation_names)\n \"\"\"\n # constants\n gender_map = {\n None: 0,\n 'female': 1,\n 'male': 2,\n 'other': 3\n }\n\n # needed fields\n gender = examples['gender']\n occupation_titles = examples['occupation_titles']\n interests = examples['interests']\n recreation_names = examples['recreation_names']\n \n # preprocess\n occupation_titles, interests, recreation_names = substituteNone(\n \"\", \n occupation_titles, interests, recreation_names\n )\n\n # one-hot\n gender = torch.LongTensor([gender_map[g] for g in gender])\n gender_one_hot = torch.nn.functional.one_hot(gender, num_classes=len(gender_map))\n\n # tokenize\n inputs = [sep.join(tp) for tp in zip(occupation_titles, interests, recreation_names)]\n model_inputs = tokenizer(inputs, max_length=max_length, padding=\"max_length\", truncation=True)\n model_inputs['gender_one_hot'] = gender_one_hot\n \n return model_inputs\n\ndef preprocessItemExamples(examples, tokenizer, max_length, subgroup_map, chapter_map, sep=\"|\"):\n \"\"\"\n preprocess course, used for ds['item'].map()\n subgroup_map: string -> index, 0-indexing!!!!!\n chapter map: item_id -> all chapter name as a string\n course_feature: log(price + 1) || onehot(subgroup) || embed(\n course_name || sub_groups || topics || will_learn || required_tools || recommended_background \n || target_group || concat(chapter_item_name)\n )\n \"\"\"\n course_name = examples['course_name']\n sub_groups = examples['sub_groups']\n topics = examples['topics']\n will_learn = examples['will_learn']\n required_tools = examples['required_tools']\n recommended_background = examples['recommended_background']\n target_group = examples['target_group']\n\n # preprocess\n course_name, sub_groups, topics, will_learn, required_tools, recommended_background, target_group = substituteNone(\n \"\",\n course_name, sub_groups, topics, will_learn, required_tools, recommended_background, target_group\n )\n\n # one-hot for subgroup\n # if a course have a subgroup, then its position will be one\n # so for courses with multiple subgroup, multiple position will be one\n subgroup_one_hot = torch.stack([\n torch.sum(\n (torch.nn.functional.one_hot(\n torch.LongTensor([subgroup_map[sg] for sg in subgroup.split(',')]),\n num_classes=len(subgroup_map)\n ) if subgroup != '' else torch.zeros((1, len(subgroup_map))).type(torch.LongTensor))\n , dim=0\n )\n for subgroup in sub_groups\n ])\n\n chapter_names = [chapter_map[course_id] for course_id in examples['course_id']]\n\n # tokenize\n inputs = [\n sep.join(tp)\n for tp in zip(\n course_name, sub_groups, topics, will_learn, required_tools, recommended_background, \n target_group, chapter_names\n )\n ]\n model_inputs = tokenizer(inputs, max_length=max_length, padding=\"max_length\", truncation=True)\n\n # other\n model_inputs['course_price'] = torch.log(torch.Tensor(examples['course_price']) + 1)\n\n model_inputs['subgroup_one_hot'] = subgroup_one_hot\n\n return model_inputs\n\ndef preprocessDataset(args, dss, tokenizer=None):\n \"\"\"\n preprocess datasets\n \n dss: contain at least [user, item, subgroup, chapter]. IDs are strings\n For reference:\n >>> pdss\n DatasetDict({\n user: Dataset({\n features: ['user_id', 'gender', 'occupation_titles', 'interests', 'recreation_names', 'input_ids', 'token_type_ids', 'attention_mask', 'gender_one_hot'],\n num_rows: 130566\n })\n item: Dataset({\n features: ['course_id', 'course_name', 'course_price', 'teacher_id', 'teacher_intro', 'groups', 'sub_groups', 'topics', 'course_published_at_local', 'description', 'will_learn', 'required_tools', 'recommended_background', 'target_group', 'input_ids', 'token_type_ids', 'attention_mask'],\n num_rows: 728\n })\n subgroup: Dataset({\n features: ['subgroup_id', 'subgroup_name'],\n num_rows: 91\n })\n chapter: Dataset({\n features: ['course_id', 'chapter_no', 'chapter_id', 'chapter_name', 'chapter_item_id', 'chapter_item_no', 'chapter_item_name', 'chapter_item_type', 'video_length_in_seconds'],\n num_rows: 21290\n })\n })\n \"\"\"\n\n if tokenizer is None:\n tokenizer = loadTokenizerWE(args)\n\n # make subgroup map\n subgroup_map = dict()\n for subgroup in dss['subgroup']:\n # subtract 1 to start from 0\n subgroup_map[subgroup['subgroup_name']] = subgroup['subgroup_id'] - 1\n\n # make chapter map\n chapter_map = defaultdict(list)\n for chapter in dss['chapter']:\n chapter_map[chapter['course_id']].append(\n chapter['chapter_item_name'] if chapter['chapter_item_name'] is not None else \"\"\n )\n chapter_map = defaultdict(str, {course_id: ','.join(ls) for course_id, ls in chapter_map.items()})\n \n pdss = DatasetDict()\n pdss['user'] = dss['user'].map(\n lambda x: preprocessUserExamples(x, tokenizer, args.max_length),\n batched=True,\n desc=\"preprocessDataset: User\"\n )\n pdss['item'] = dss['item'].map(\n lambda x: preprocessItemExamples(x, tokenizer, args.max_length, subgroup_map, chapter_map),\n batched=True,\n desc=\"preprocessDataset: Item\"\n )\n\n pdss['user'].set_format(\n type=\"torch\", \n columns=[\"input_ids\", \"token_type_ids\", \"attention_mask\", \"gender_one_hot\"],\n output_all_columns=True\n )\n pdss['item'].set_format(\n type=\"torch\", \n columns=[\"input_ids\", \"token_type_ids\", \"attention_mask\", \"subgroup_one_hot\", \"course_price\"],\n output_all_columns=True\n )\n\n return pdss\n\ndef toBertInput(inputs, to_device=None):\n \"\"\"\n model(**inputs) won't work if input contains other fields,\n model(**toBertInput(inputs)) solves this problem\n \"\"\"\n if to_device is None:\n return {\n name: inputs[name]\n for name in ['input_ids', 'token_type_ids', 'attention_mask']\n }\n else:\n return {\n name: inputs[name].to(to_device)\n for name in ['input_ids', 'token_type_ids', 'attention_mask']\n }\n\ndef makeUserItemExamplesDataset(dss, df, neg_portion=0.5):\n \"\"\"\n make some positive and negative examples for user-item pairs\n\n dss: dataset (containing at least user & item)\n df: dataframe for positive `user_id -> list of item_id`, e.g. train.csv\n neg_portion: how much portion does negative samples have, in [0, 1)\n e.g. if n_pos = 100 and neg_portion = 0.25, then we will make 33 negative examples (33/(100+33) = 0.25)\n this can be set to 0 to avoid making any negative examples\n\n return a dataset containing [user_id, item_id, label (0 or 1)], not sorted in any way\n IDs are strings\n \"\"\"\n user_ids = dss['user']['user_id']\n item_ids = dss['item']['course_id']\n\n pos_set = set()\n \n # get all positive samples\n for i, row in df.iterrows():\n user_id = row['user_id']\n for item_id in row['course_id'].split(' '):\n pos_set.add((user_id, item_id))\n \n n_pos = len(pos_set)\n n_neg = int(n_pos * neg_portion / (1 - neg_portion))\n \n # sample negatives\n neg_set = set()\n while len(neg_set) < n_neg:\n user_id = random.choice(user_ids)\n item_id = random.choice(item_ids)\n p = (user_id, item_id)\n if p in pos_set or p in neg_set:\n continue\n neg_set.add(p)\n \n # make dataset\n ds_ls = [{'user_id': uid, 'item_id': iid, 'label': 1} for uid, iid in pos_set]\n ds_ls.extend([{'user_id': uid, 'item_id': iid, 'label': 0} for uid, iid in neg_set])\n \n return Dataset.from_list(ds_ls)\n\ndef loadUserItemExamplesDataset(args):\n return datasets.load_from_disk(args.load_useritem_example_ds)\n\ndef makeUserItemFeatureDataset(args, accelerator, pdss):\n \"\"\"\n make user and item feature vectors, i.e. actually go through stuff because I\n don't want to train bert for now. Will add an `embed` column, which is the feature vector\n \n IDs are still strings\n\n pdss: from preprocessDataset()\n\n return DatasetDict({\n user: Dataset({\n features: ['user_id', 'embed'],\n num_rows: 130566\n })\n item: Dataset({\n features: ['item_id', 'embed'],\n num_rows: 728\n })\n })\n\n where user embedding dim is 772, item is 860.\n \"\"\"\n def userMap(examples, model, accelerator):\n \"user_feature = onehot(gender) || embed(occupation_titles || interests || recreation_names)\"\n outputs = model(**toBertInput(examples, to_device=accelerator.device))\n last_hidden_states = outputs.last_hidden_state # (N, token_cnt, 768)\n embed = torch.concat((examples['gender_one_hot'].to(accelerator.device), last_hidden_states[:, 0, :]), dim=-1)\n # print(embed.shape) # should be (N, 768+4)\n\n return {'user_id': examples['user_id'], 'embed': embed}\n\n\n def itemMap(examples, model, accelerator):\n \"\"\"\n course_feature = log(price + 1) || onehot(subgroup) || embed(\n course_name || sub_groups || topics || will_learn || required_tools || recommended_background \n || target_group || concat(chapter_item_name)\n course_name, sub_groups, topics, will_learn, required_tools, recommended_background, target_group, concat(chapter_item_name)\n )\n \"\"\"\n outputs = model(**toBertInput(examples, to_device=accelerator.device))\n last_hidden_states = outputs.last_hidden_state # (1, token_cnt, 768)\n embed = torch.concat(\n (\n examples['course_price'].unsqueeze(1).to(accelerator.device), \n examples['subgroup_one_hot'].to(accelerator.device), \n last_hidden_states[:, 0, :]\n ), dim=-1\n )\n\n return {'item_id': examples['course_id'], 'embed': embed}\n\n model = loadModelWE(args)\n model = accelerator.prepare(model)\n model.eval()\n ds_dict = DatasetDict()\n\n # 3060 6G, 20min\n ds_dict['user'] = pdss['user'].map(\n lambda x: userMap(x, model, accelerator),\n batched=True,\n batch_size=24,\n remove_columns=pdss['user'].column_names,\n desc=\"makeUserItemFeatureDataset: User\"\n )\n ds_dict['item'] = pdss['item'].map(\n lambda x: itemMap(x, model, accelerator),\n batched=True,\n batch_size=8,\n remove_columns=pdss['item'].column_names,\n desc=\"makeUserItemFeatureDataset: Item\"\n )\n return ds_dict\n\ndef loadUserItemFeatureDataset(args):\n return datasets.load_from_disk(args.load_feature_ds)\n \ndef makeUserItemEmbedding(fdss):\n \"\"\"\n make embedding from fdss['user'], fdss['item']\n fdss is from makeFeatureDataset(), or load yourself one\n \n user_map, item_map: id(str) -> index(int), 0-indexing\n\n so for a user_id (str), its embedding is user_embed(item_map[user_id])\n \"\"\"\n fdss['user'].set_format(\n type=\"torch\", columns=[\"embed\"], output_all_columns=True\n )\n fdss['item'].set_format(\n type=\"torch\", columns=[\"embed\"], output_all_columns=True\n )\n\n user_embed = torch.nn.Embedding.from_pretrained(fdss['user']['embed'], freeze=True)\n uids = fdss['user']['user_id']\n user_map = {uid: i for i, uid in enumerate(uids)}\n\n item_embed = torch.nn.Embedding.from_pretrained(fdss['item']['embed'], freeze=True)\n iids = fdss['item']['item_id']\n item_map = {iid: i for i, iid in enumerate(iids)}\n\n return user_embed, user_map, item_embed, item_map\n\ndef makeUserItemDataloader(args, eds, user_embed, user_map, item_embed, item_map, is_train=True):\n \"\"\"\n make dataloader from eds,\n the resulting dataset inside will have ['user_embed', 'item_embed', 'label'] inside\n\n >>> a = iter(dataloader) # batch size 8\n >>> b = next(a)\n >>> b['item_embed'].shape\n torch.Size([8, 860])\n >>> b['user_embed'].shape\n torch.Size([8, 772])\n >>> b['label'].shape\n torch.Size([8])\n \"\"\"\n def embedMap(examples):\n # turn string id into number\n user_ids = [user_map[uid] for uid in examples['user_id']]\n item_ids = [item_map[iid] for iid in examples['item_id']]\n\n # turn id into embedding\n ret = dict()\n ret['user_embed'] = user_embed(torch.LongTensor(user_ids))\n ret['item_embed'] = item_embed(torch.LongTensor(item_ids))\n ret['label'] = examples['label']\n return ret\n\n ds = eds.map(\n embedMap, batched=True, remove_columns=eds.column_names,\n desc=\"makeUserItemDataloader\"\n )\n\n ds.set_format(type=\"torch\", columns=ds.column_names, output_all_columns=True)\n\n # make that into dataloader\n dataloader = DataLoader(\n ds,\n shuffle=True if is_train else False,\n batch_size=args.train_batch_size if is_train else args.eval_batch_size,\n )\n\n return dataloader","repo_name":"Kaiserouo/ADL22-Final","sub_path":"two_tower/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":23802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"27245769887","text":"import random\nfrom turtle import Turtle, colormode\n\nCOLOR = [\"red\", \"green\", \"blue\", \"white\", \"cyan\", \"magenta\"]\ncolormode(255)\n\n\nclass Food(Turtle):\n def __init__(self):\n super().__init__()\n self.shape(\"circle\")\n self.penup()\n self.shapesize(stretch_len=0.5, stretch_wid=0.5)\n self.refresh()\n\n def refresh(self):\n self.color(random.choice(COLOR))\n random_x = random.randint(-280, 280)\n random_y = random.randint(-280, 280)\n self.goto(random_x, random_y)\n","repo_name":"vipcodes/Snake_Game","sub_path":"food.py","file_name":"food.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"36458454955","text":"class Solution(object):\n def wordPattern(self, pattern, str):\n \"\"\"\n :type pattern: str\n :type str: str\n :rtype: bool\n \"\"\"\n s = pattern\n t = str.split(' ')\n if len(s) != len(t):\n return False\n s_t_dict, t_s_dict = {}, {}\n for i in xrange(0, len(s)):\n if s[i] not in s_t_dict:\n if t[i] in t_s_dict:\n return False\n s_t_dict[s[i]] = t[i]\n t_s_dict[t[i]] = s[i]\n else:\n if s_t_dict[s[i]] != t[i]:\n return False\n return True ","repo_name":"StandAloneComplex666/leetcode_python","sub_path":"# 290. Word Pattern.py","file_name":"# 290. Word Pattern.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"97"} +{"seq_id":"26855593426","text":"# only predict the next q\r\nimport os\r\nimport argparse\r\nimport tqdm\r\nimport numpy as np\r\nimport pickle\r\nfrom robot.controller.rollout_controller import RolloutCEM\r\n\r\nimport torch\r\nfrom robot.model.arm.dataset import Dataset\r\nfrom robot.model.arm.envs import make\r\nfrom robot import U\r\nfrom .agents import RolloutAgent\r\n\r\n\r\nclass DatasetWrapper:\r\n def __init__(self, dataset, batch_size, timestep, cls):\r\n self.dataset = dataset\r\n self.batch_size = batch_size\r\n self.timestep = timestep\r\n self.cls = cls\r\n\r\n def sample(self, mode):\r\n obs, action = self.dataset.sample(mode=mode, batch_size=self.batch_size, timestep=self.timestep)\r\n frames = self.cls.from_observation(obs)\r\n return frames[..., 0, :], action, frames[..., 1:, :]\r\n\r\n\r\nclass RolloutWrapper:\r\n def __init__(self, model, frame_type):\r\n self.model = model\r\n self.cls = frame_type\r\n\r\n def rollout(self, obs, a, goal):\r\n obs = U.togpu(obs)\r\n a = U.togpu(a)\r\n if goal is not None:\r\n goal = U.togpu(goal)\r\n\r\n s = self.cls.from_observation(obs)\r\n predict, reward = self.model.rollout(s, a, goal)\r\n return predict, -reward\r\n\r\n\r\nclass Renderer:\r\n # renderer is not the state\r\n def __init__(self, env):\r\n self.env = env\r\n\r\n def render_frame(self, x):\r\n obs = x.as_observation()\r\n if self.goal is not None:\r\n obs['desired_goal'] = self.goal\r\n return self.env.unwrapped.render_obs(obs, reset=False)\r\n\r\n def render(self, data, info, num=2, return_image=True, goal=None):\r\n self.goal = goal\r\n start, actions, future = [i[:num] for i in data]\r\n\r\n start = start.cpu()\r\n future = future.cpu()\r\n\r\n predict = info['predict']\r\n\r\n images = []\r\n for i in range(num):\r\n start_img = self.render_frame(start[i])\r\n ground_truth = [start_img] + [self.render_frame(s) for s in future[i]]\r\n predicted = [start_img]+ [self.render_frame(s) for s in predict[i]]\r\n if return_image:\r\n images.append(np.concatenate((\r\n np.concatenate(ground_truth, axis=1),\r\n np.concatenate(predicted, axis=1),\r\n ), axis=0))\r\n else:\r\n def make():\r\n for a, b in zip(ground_truth, predicted):\r\n yield np.concatenate((a, b), axis=1)\r\n return make()\r\n return np.stack(images)\r\n\r\n\r\ndef get_args():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--env_name\", type=str, default=None, choices=['arm', 'acrobat2', 'plane', 'cheetah'])\r\n parser.add_argument(\"--batchnorm\", type=int, default=0)\r\n parser.add_argument(\"--model\", type=str, default='mlp')\r\n parser.add_argument(\"--batch_size\", type=int, default=256)\r\n\r\n parser.add_argument(\"--num_epoch\", type=int, default=1000)\r\n parser.add_argument(\"--num_train_iter\", type=int, default=10000)\r\n parser.add_argument(\"--num_valid_iter\", type=int, default=200)\r\n parser.add_argument(\"--lr\", type=float, default=0.001)\r\n parser.add_argument(\"--path\", type=str, default='rollout')\r\n parser.add_argument(\"--timestep\", type=int, default=10)\r\n parser.add_argument(\"--device\", type=str, default='cuda:0')\r\n\r\n parser.add_argument(\"--weight_q\", type=float, default=1.)\r\n parser.add_argument(\"--weight_dq\", type=float, default=1)\r\n parser.add_argument(\"--weight_ee\", type=float, default=1.)\r\n parser.add_argument(\"--resume\", type=int, default=0)\r\n args = parser.parse_args()\r\n return args\r\n\r\n\r\nclass trainer:\r\n def __init__(self, args):\r\n self.args = args\r\n self.path = self.args.path\r\n self.vis = U.Visualizer(args.path)\r\n\r\n with open(os.path.join(args.path, 'args'), 'wb') as f:\r\n pickle.dump(args, f)\r\n\r\n self.set_env()\r\n self.set_model()\r\n self.set_agent()\r\n\r\n self.set_policy()\r\n\r\n self.set_renderer()\r\n\r\n self.epoch_num = 0\r\n for self.epoch_num in range(args.num_epoch):\r\n self.epoch(args.num_train_iter, args.num_valid_iter, num_eval=5, use_tqdm=True)\r\n # we save the agent by default\r\n self.save()\r\n\r\n def make_frame_cls(self, env_name, env):\r\n from .frame import ArmBase, Plane\r\n if env_name == 'arm':\r\n return ArmBase\r\n elif env_name == 'plane':\r\n return Plane\r\n else:\r\n raise NotImplementedError\r\n\r\n def set_env(self):\r\n args = self.args\r\n self.env = make(args.env_name)\r\n dataset_path = os.path.join('/dataset/', args.env_name)\r\n self.frame_type = self.make_frame_cls(args.env_name, self.env)\r\n self.dataset = DatasetWrapper(Dataset(dataset_path, device=args.device),\r\n batch_size=args.batch_size,\r\n timestep=args.timestep, cls = self.frame_type)\r\n\r\n def set_model(self):\r\n from .models import MLP_ARM\r\n cls = self.frame_type\r\n self.model = MLP_ARM(cls.input_dims, cls.output_dims, 4, 256, batchnorm=self.args.batchnorm)\r\n\r\n def set_agent(self):\r\n # max_a is very important to make it work...\r\n args = self.args\r\n if not args.resume:\r\n agent = RolloutAgent(self.model, lr=args.lr, loss_weights={\r\n 'q_loss': args.weight_q,\r\n 'dq_loss': args.weight_dq,\r\n 'ee_loss': args.weight_ee,\r\n })\r\n else:\r\n import torch, os\r\n agent = torch.load(os.path.join(args.path, 'agent'))\r\n\r\n self.agent = agent.cuda()\r\n\r\n def set_renderer(self):\r\n self.renderer = Renderer(self.env)\r\n\r\n def set_rollout_model(self):\r\n self.rollout_predictor = RolloutWrapper(self.agent, self.frame_type)\r\n\r\n def set_policy(self):\r\n self.set_rollout_model()\r\n args = self.args\r\n self.controller = RolloutCEM(self.rollout_predictor, self.env.action_space, iter_num=10,\r\n horizon=args.timestep-1, num_mutation=500, num_elite=50, device=args.device)\r\n\r\n def epoch(self, num_train, num_valid, num_eval=5, use_tqdm=False):\r\n print(\"TRAIN EPOCH\", self.epoch_num)\r\n\r\n def evaluate(to_vis, num_eval):\r\n if num_eval == 0:\r\n return\r\n\r\n policy = U.eval_policy(self.controller, self.env, eval_episodes=num_eval, save_video=0.,\r\n video_path=os.path.join(self.path, \"video{}.avi\"), return_trajectories=True)\r\n to_vis['reward_eval'], trajectories = policy\r\n\r\n obs, actions = trajectories[0]\r\n goal = obs[0]['desired_goal']\r\n obs = np.array([i['observation'] for i in obs]) # note that we always use goal conditioned environment\r\n actions = np.array(actions)\r\n\r\n as_frame = self.frame_type.from_observation(obs)\r\n predict = self.rollout_predictor.rollout(obs[None, 0], actions[None, :], None)[0].cpu()\r\n\r\n data = [as_frame[None, 0], actions, as_frame[None, 1:]]\r\n to_vis['rollout'] = self.renderer.render(data, {'predict': predict}, num=1, return_image=False, goal=goal)\r\n return to_vis\r\n\r\n\r\n ran = tqdm.trange if use_tqdm else range\r\n # train\r\n train_output = []\r\n for idx in ran(num_train):\r\n data = self.dataset.sample('train')\r\n info = self.agent.update(*data)\r\n train_output.append(info)\r\n if idx % 200 == 0:\r\n out = U.merge_training_output(train_output)\r\n if idx == 0:\r\n out['image'] = self.renderer.render(data, info)\r\n self.vis(out)\r\n train_output = []\r\n\r\n # evaluate\r\n self.agent.eval()\r\n assert not self.agent.training\r\n valid_output = []\r\n to_vis = {}\r\n\r\n for i in ran(num_valid):\r\n data = self.dataset.sample('valid')\r\n info = self.agent.update(*data)\r\n valid_output.append(info)\r\n if i == num_valid - 1:\r\n to_vis = U.merge_training_output(valid_output)\r\n to_vis['valid_image'] = self.renderer.render(data, info)\r\n\r\n to_vis = {'valid_'+i:v for i, v in to_vis.items()}\r\n\r\n evaluate(to_vis, num_eval=num_eval)\r\n self.vis(to_vis, self.vis.tb.step - 1)\r\n self.agent.train()\r\n\r\n def save(self):\r\n torch.save(self.agent, os.path.join(self.path, 'agent'))\r\n","repo_name":"hzaskywalker/torch_robotics","sub_path":"robot/model/arm/train_utils.py","file_name":"train_utils.py","file_ext":"py","file_size_in_byte":8588,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"97"} +{"seq_id":"28902013646","text":"import tkinter as tk\nfrom tkinter import ttk\nimport plotly.graph_objects as go\nfrom PIL import ImageTk, Image\nimport numpy as np\nimport cv2\nimport io\n\nclass engineSensorsWindow(ttk.Frame):\n def __init__(self,parent):\n super().__init__(parent)\n self.grid()\n self.controller = None\n self.__setEngineTemp(1,0,1,1)\n self.__setCoolantLevel(1,1,1,1)\n self.__setFuelLevel(3,1,1,1)\n self.__setSpeedometer(3, 0, 1, 1)\n self.__addFrameSwitchButton(4)\n self.__addCameraButton(4)\n self.__setOutdoorTemperature(1,2,1,1)\n self.__setIndoorTemperature(3,2,1,1)\n self.__setFuelPercentageValue(2,2,1,1)\n for row in range(5):\n self.grid_rowconfigure(row, weight=1)\n for col in range(2):\n self.grid_columnconfigure(col, weight=1)\n global emptyFuel\n emptyFuel = ImageTk.PhotoImage(Image.open(\"fluid0T.png\"))\n global speedfig\n \n\n def set_controller(self,controller):\n self.controller = controller\n\n def __setFuelPercentageValue(self,row,col,rowspan,colspan):\n global fuelText\n fuelText = tk.StringVar()\n fuelText.set(\"Fuel: - %\")\n fuelPercentageLabel = tk.Label(self,textvariable=fuelText,bg=\"#1E2130\",fg='white',font=(\"Open sans\", 15))\n fuelPercentageLabel.grid(row=row,column=col,rowspan=rowspan)\n\n def __setOutdoorTemperature(self,row,col,rowspan,colspan):\n global outdoortemp\n outdoortemp = tk.StringVar()\n outdoortemp.set(\"Outdoor Temp: \" + \" -C\\N{DEGREE SIGN}\")\n outdoortempLabel = tk.Label(self,textvariable=outdoortemp,bg=\"#1E2130\",fg='white',font=(\"Open sans\", 15))\n outdoortempLabel.grid(row=row,column=col,rowspan=rowspan)\n\n def __setIndoorTemperature(self,row,col,rowspan,colspan):\n self.__addText(\"Indoor Temp: \" + \"- C\\N{DEGREE SIGN}\", row, col, rowspan)\n\n def __addText(self,text,row,col,span):\n label = tk.Label(self,text=text,bg=\"#1E2130\",fg='white',font=(\"Open sans\", 15))\n label.grid(row=row,column=col,rowspan=span)\n\n def __addFrameSwitchButton(self,row):\n switchButton = tk.Button(self,text=\"Sensor screen\",fg='white', bg=\"#1E2130\", font=(\"Open sans\", 15),\n command=self.onSwitchButtonClick)\n switchButton.grid(row=row,column=0)\n\n def __addCameraButton(self,row):\n reversingButton = tk.Button(self,text=\"Reversing Camera\",fg='white', bg=\"#1E2130\", font=(\"Open sans\", 15),\n command=self.onCameraButtonClick)\n reversingButton.grid(row=row,column=1)\n\n def onSwitchButtonClick(self):\n self.controller.show_frame(\"sensorWindow\")\n\n def onCameraButtonClick(self):\n self.controller.show_frame(\"reversingCamera\")\n\n def __setFuelLevel(self,row,col,rowspan,colspan):\n global fuelImg\n fuelImg = ImageTk.PhotoImage(Image.open(\"fluid50DT.png\"))\n global fuelCanvas\n fuelCanvas = tk.Canvas(self, bg=\"#1E2130\", width=190, height=150,highlightthickness=0)\n fuelCanvas.grid(row=row, column=col,rowspan=rowspan,columnspan=colspan)\n global fuelCImg\n fuelCImg = fuelCanvas.create_image(100,150, anchor=\"s\",image=fuelImg)\n self.__addTitleToImage(\"Fuel level\", row-1, col)\n\n def __setCoolantLevel(self,row,col,rowspan,colspan):\n global CoolantImg\n CoolantImg = ImageTk.PhotoImage(Image.open(\"fluid50T.png\"))\n global coolantCanvas\n coolantCanvas = tk.Canvas(self, bg=\"#1E2130\", width=190, height=150,highlightthickness=0)\n coolantCanvas.grid(row=row, column=col,rowspan=rowspan,columnspan=colspan)\n global coolantContainer\n coolantContainer = coolantCanvas.create_image(100,150, anchor=\"s\",image=CoolantImg)\n self.__addTitleToImage(\"Coolant level\", row-1, col)\n\n def __addTitleToImage(self,text,row,col):\n label = tk.Label(self,text=text,bg=\"#1E2130\",fg='white',font=(\"Open sans\", 15))\n label.grid(row=row,column=col,sticky=\"s\")\n\n def __setEngineTemp(self,row,col,rowspan,colspan):\n speedometer = self.__createFigure(0)\n speedometer.write_image(\"assets/temp.png\")\n engineTempImg = Image.open(\"assets/temp.png\")\n engineTempImg = engineTempImg.resize((210,150))\n engineTempImg = ImageTk.PhotoImage(engineTempImg)\n global engineCanvas\n engineCanvas = tk.Canvas(self, bg=\"#1E2130\", width=210, height=100,highlightthickness=0)\n engineCanvas.grid(row=row, column=col,columnspan=colspan,rowspan=rowspan)\n engineCanvas.create_image(100,115, anchor=\"s\",image=engineTempImg)\n engineCanvas.imgref = engineTempImg\n self.__addTitleToImage(\"Engine temperature\", row-1, col)\n\n def __setSpeedometer(self,row,col,rowspan,colspan):\n global speedometer\n speedometer = self.__createSpeedGaugeFigure(0)\n speedometer.write_image(\"speed.png\")\n global img\n img = Image.open(\"speed.png\")\n img = img.resize((210,150))\n img = ImageTk.PhotoImage(img)\n \n canvas = tk.Canvas(self, bg=\"#1E2130\", width=210, height=150,highlightthickness=0)\n canvas.grid(row=row, column=col,columnspan=colspan,rowspan=rowspan)\n canvas.create_image(100,115, anchor=\"s\",image=img)\n canvas.imgref = img\n self.__addTitleToImage(\"SPEED\", row-1, col)\n\n def __createFigure(self,temp):\n lay = go.Layout(\n paper_bgcolor='rgba(0,0,0,0)',\n plot_bgcolor='rgba(0,0,0,0)'\n )\n fig = go.Figure(go.Indicator(\n domain = {'x': [0, 1], 'y': [0, 1]},\n value = temp,\n mode = \"gauge+number\",\n number = {'font': {'size': 150}},\n gauge = {'axis': {'range': [-20, 120], 'tickwidth': 1,'tickcolor': \"black\"},\n 'bar': {'color': \"MidnightBlue\"},#MidnightBlue\"},\n 'steps' : [\n {'range': [-20, 40], 'color': \"Blue\"},\n {'range': [40, 80], 'color': \"Green\"},\n {'range': [80, 95], 'color': \"Orange\"},\n {'range': [95, 120], 'color': \"Red\"}]}),\n layout=lay)\n return fig\n\n def __createSpeedGaugeFigure(self,speed):\n lay = go.Layout(\n paper_bgcolor='rgba(0,0,0,0)',\n plot_bgcolor='rgba(0,0,0,0)'\n )\n speedfig = go.Figure(go.Indicator(\n mode = \"number\",\n value = speed,\n number= {'font': {'size': 150}},\n domain = {'row': 0, 'column': 1},\n #domain = {'x': [0, 1], 'y': [0, 1]},\n #gauge = {'axis': {'range': [None, 160]}}\n ),\n layout=lay)\n return speedfig\n\n def fuel100(self,fuelValue):\n fuelImg = Image.open(\"fluid50DT.png\")\n fuelImg = ImageTk.PhotoImage(fuelImg)\n fuelCanvas.imgref = fuelImg\n fuelCanvas.itemconfig(fuelCImg,image = fuelImg)\n self.changeFuelText(fuelValue)\n\n def fuel50(self,fuelValue):\n fuelImg = Image.open(\"fluid50DT.png\")\n fuelImg = ImageTk.PhotoImage(fuelImg)\n fuelCanvas.imgref = fuelImg\n fuelCanvas.itemconfig(fuelCImg,image = fuelImg)\n self.changeFuelText(fuelValue)\n\n def fuel25(self,fuelValue):\n fuelImg = Image.open(\"fluid50DT.png\")\n fuelImg = ImageTk.PhotoImage(fuelImg)\n fuelCanvas.imgref = fuelImg\n fuelCanvas.itemconfig(fuelCImg,image = fuelImg)\n self.changeFuelText(fuelValue)\n\n def fuel0(self,fuelValue):\n fuelImg = Image.open(\"fluid0DT.png\")\n fuelImg = ImageTk.PhotoImage(fuelImg)\n fuelCanvas.imgref = fuelImg\n fuelCanvas.itemconfig(fuelCImg,image = fuelImg)\n self.changeFuelText(fuelValue)\n\n def changeFuelText(self,fuelvalue):\n fuelText.set(\"Fuel: \" + fuelvalue + \"%\")\n\n def empty(self):\n emptyFuel = Image.open(\"fluid0T.png\")\n emptyFuel = ImageTk.PhotoImage(emptyFuel)\n fuelCanvas.imgref = emptyFuel\n fuelCanvas.itemconfig(fuelCImg,image = emptyFuel)\n\n def fullCoolant(self):\n fullCoolant = Image.open(\"coolantHigh.png\")\n fullCoolant = ImageTk.PhotoImage(fullCoolant)\n coolantCanvas.imgref = fullCoolant\n coolantCanvas.itemconfig(coolantContainer,image = fullCoolant)\n\n def emptyCoolant(self):\n emptyCoolant = Image.open(\"coolantLow.png\")\n emptyCoolant = ImageTk.PhotoImage(emptyCoolant)\n coolantCanvas.imgref = emptyCoolant\n coolantCanvas.itemconfig(coolantContainer,image = emptyCoolant)\n\n def moderateCoolant(self):\n moderateCoolant = Image.open(\"fluidOKT.png\")\n moderateCoolant = ImageTk.PhotoImage(moderateCoolant)\n coolantCanvas.imgref = moderateCoolant\n coolantCanvas.itemconfig(coolantContainer,image = moderateCoolant)\n\n def updateEngineTemp(self,engineTemp):\n speedometer = self.__createFigure(engineTemp)\n speedometer.write_image(\"assets/temp.png\")\n engineTempImg = Image.open(\"assets/temp.png\")\n engineTempImg = engineTempImg.resize((210,150))\n engineTempImg = ImageTk.PhotoImage(engineTempImg)\n engineCanvas.imgref = engineTempImg\n engineCanvas.itemconfig(fuelCImg,image = engineTempImg)\n\n def updateAllTemps(self,allTemps):\n outdoortemp.set(\"Outdoor Temp: \" + allTemps +\" C\\N{DEGREE SIGN}\")","repo_name":"Ztolpotato/motorhome_application","sub_path":"main/gui/engineSensorsWindow.py","file_name":"engineSensorsWindow.py","file_ext":"py","file_size_in_byte":9331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"6217684810","text":"# -*- coding: utf-8 -*-\n\nimport configparser\nimport logging\nimport vertica_python\nfrom neo4j import GraphDatabase\nimport csv\nimport paramiko\n# import SqlExtracter as sqlex\n# import KettleClass as kcs\n\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(lineno)d - %(levelname)s - %(message)s')\n\n# 获取vertica连接配置\nlogging.info('load db config')\nconfig = configparser.ConfigParser()\nconfig.read('E:\\\\MySelfcode\\\\h3c\\\\neo4j\\\\access.conf', encoding='utf-8')\nvertica_config = dict(config.items('vertica'))\nvertica_config['backup_server_node'] = vertica_config['backup_server_node'].split(',')\n\n\nwith vertica_python.connect(**vertica_config) as connection:\n logging.info('get connection with vertica')\n ip = \"10.90.15.10\"\n port = 22\n user = \"root\"\n password = \"MFmg5pmQsSeK\"\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(ip, port, user, password)\n # file_name = ssh.exec_command(\"/opt/neo4j/import/relation.csv\")\n file_name='E:\\\\MySelfcode\\\\h3c\\\\neo4j\\\\relation.csv'\n with open(file_name, 'w', encoding='utf-8', newline='\\n') as f:\n cur = connection.cursor()\n head=[]\n write = csv.writer(f)\n\n count=cur.execute(\"\"\"SELECT id,\n parent_id \n FROM cms.CRM_T_COMPANY\n WHERE dw_status='A'\"\"\")\n for index in count.description:\n head.append(index[0])\n head = tuple(head)\n write.writerow(head)\n while True:\n rows = cur.fetchmany(10000)\n if not rows: break\n for row in rows:\n write.writerow(row)\n logging.info(\"文件创建完成\")\n uri = \"bolt://10.90.15.10:7687\"\n driver = GraphDatabase.driver(uri, auth=(\"neo4j\", \"admin\"))\n with driver.session() as session:\n tx = session.begin_transaction()\n sftp = ssh.open_sftp()\n sftp.put(r'E:\\\\MySelfcode\\\\h3c\\\\neo4j\\\\relation.csv','/opt/neo4j/import/relation.csv')\n tx.run('LOAD CSV WITH HEADERS FROM \"file:///relation.csv\" AS line'\n ' match(c1:customer{id:line.id}),(c2:customer{id:line.parent_id}) '\n 'merge (c1)-[r:客户父子]->(c2)')\n\nsession.close()\n\n\n","repo_name":"Joseph025/joseph","sub_path":"01-pyspark/05_pythonAPI连接linux_neo4j_load_csv.py","file_name":"05_pythonAPI连接linux_neo4j_load_csv.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"97"} +{"seq_id":"23039083247","text":"'''\nThe data structure representing a Bubble.\n'''\nclass Bubble:\n def __init__(self, bubble_num):\n self.num = bubble_num\n self.paths = {}\n\n def add_path(self, taxon, path):\n self.paths[taxon] = path\n\n def __str__(self):\n out = ''\n for taxon, path in self.paths.items():\n out += '> ' + taxon + '\\n'\n out += str(path) + '\\n'\n return out\n\n def to_expanded_str(self):\n out = ''\n inv_paths = {}\n for taxon, path in self.paths.items():\n inv_paths[path] = inv_paths.get(path, [])\n inv_paths[path].append(taxon)\n for path, taxa in inv_paths.items():\n out += '> bubble ' + str(self.num) + ' for ' + ', '.join(taxa) + '\\n'\n out += str(path) + '\\n'\n return out\n","repo_name":"Colelyman/kleuren","sub_path":"scripts/bubble.py","file_name":"bubble.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"97"} +{"seq_id":"14886129078","text":"import torch\r\nfrom torch.utils.data import Dataset\r\nimport numpy as np\r\nimport os\r\nimport re\r\nfrom PIL import Image\r\n\r\n\r\nclass SemanticSegmentationDataset(Dataset):\r\n def __init__(self, \r\n img_paths, \r\n mask_paths=None,\r\n size=(256, 256),\r\n mode='binary',\r\n normalize=None):\r\n \"\"\"\r\n Example semantic segmentation Dataset class.\r\n Run once when instantiating the Dataset object.\r\n If you want to use it for binary semantic segmentation, \r\n please select the mode as 'binary'. For multi-class, enter 'multi'.\r\n example_data/\r\n └── /images/\r\n └── 0001.png\r\n └── 0002.png\r\n └── 0003.png\r\n └── ...\r\n /masks/\r\n └── 0001_mask.png\r\n └── 0002_mask.png\r\n └── 0003_mask.png\r\n └── ...\r\n img_paths : str\r\n The file path indicating the main directory that contains only images.\r\n mask_paths : str, default=None\r\n The file path indicating the main directory that contains only \r\n ground truth images.\r\n size : tuple, default=(256, 256)\r\n Enter the (width, height) values into a tuple for resizing the data.\r\n mode : str, default='binary'\r\n Choose how the DataSet object should generate data. \r\n Enter 'binary' for binary masks.\r\n normalize : orchvision.transforms.Normalize, default=None\r\n Normalize a tensor image with mean and standard deviation. \r\n This transform does not support PIL Image.\r\n \"\"\"\r\n self.img_paths = self._get_file_dir(img_paths)\r\n self.mask_paths = self._get_file_dir(mask_paths) if mask_paths is not None else mask_paths\r\n self.size = size\r\n self.mode = mode\r\n self.normalize = normalize\r\n \r\n def __len__(self):\r\n \"\"\"\r\n Returns the number of samples in our dataset.\r\n Returns\r\n ------- \r\n num_datas : int \r\n Number of datas.\r\n \"\"\"\r\n return len(self.img_paths)\r\n \r\n def __getitem__(self, index):\r\n \"\"\"\r\n Loads and returns a sample from the dataset at \r\n the given index idx. Based on the index, it \r\n identifies the image’s location on disk, \r\n converts that to a tensor using read_image, \r\n retrieves the corresponding label from the \r\n ground truth data in self.mask_paths, calls the transform \r\n functions on them (if applicable), and returns \r\n the tensor image and corresponding label in a tuple.\r\n Returns\r\n ------- \r\n img, mask : torch.Tensor\r\n The transformed image and its corresponding \r\n mask image. If the mask path is None, it \r\n will only return the transformed image.\r\n output_shape_mask: (batch_size, 1, img_size, img_size)\r\n output_shape_img: (batch_size, 3, img_size, img_size)\r\n \"\"\"\r\n img_path = self.img_paths[index]\r\n img = Image.open(img_path).convert(\"RGB\")\r\n img = img.resize((self.size[0], self.size[1])) \r\n img = torch.Tensor(np.array(img, dtype=np.uint8).transpose((2, 0, 1)))\r\n if self.mask_paths is not None:\r\n mask_path = self.mask_paths[index]\r\n mask = Image.open(mask_path)\r\n mask = mask.resize((self.size[0], self.size[1])) \r\n mask = np.array(mask)\r\n \r\n if self.mode == 'binary':\r\n mask = self._binary_mask(mask)\r\n else: \r\n mask = self._multi_class_mask(mask)\r\n\r\n mask = torch.as_tensor(mask, dtype=torch.uint8)\r\n if self.normalize: img = self.normalize(img)\r\n return img, mask\r\n else:\r\n if self.normalize: img = self.normalize(img)\r\n return img\r\n\r\n def _multi_class_mask(self, mask):\r\n obj_ids = np.unique(mask)[1:]\r\n masks = mask == obj_ids[:, None, None]\r\n return masks\r\n\r\n def _binary_mask(self, mask):\r\n mask[:, :][mask[:, :] >= 1] = 1\r\n mask[:, :][mask[:, :] < 1] = 0\r\n mask = np.expand_dims(mask, axis=0)\r\n return mask\r\n\r\n def _get_file_dir(self, directory):\r\n \"\"\"\r\n Returns files in the entered directory.\r\n Parameters\r\n ----------\r\n directory : string\r\n File path.\r\n Returns\r\n -------\r\n directories: list\r\n All files in the directory.\r\n \"\"\"\r\n def atoi(text):\r\n return int(text) if text.isdigit() else text\r\n \r\n def natural_keys(text):\r\n return [atoi(c) for c in re.split('(\\d+)',text)]\r\n\r\n for roots,dirs,files in os.walk(directory): \r\n if files:\r\n directories = [roots + os.sep + file for file in files]\r\n directories.sort(key=natural_keys)\r\n\r\n return directories","repo_name":"mberkay0/pretrained-backbones-unet","sub_path":"backbones_unet/utils/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":5130,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"97"} +{"seq_id":"21935820100","text":"from django.urls import path\r\nfrom . import views\r\n\r\n\r\nurlpatterns = [\r\n path('getRoutes',views.getRoutes,name='getRoutes'),\r\n path('signup',views.signUp,name='signUp'),\r\n path('login',views.login,name='login'),\r\n path('getProductsList',views.getProductsList,name='getProductsList'),\r\n path('addToCart',views.addToCart,name='addToCart'),\r\n path('getCartItems',views.getCartItems,name='getCartItems'),\r\n path('removeFromCart',views.removeFromCart,name='removeFromCart'),\r\n path('processOrder',views.processOrder,name='processOrder'),\r\n path('getItem',views.getItem,name='getItem'),\r\n]","repo_name":"Harsh-2163/ECommerceSite-by-django-and-react","sub_path":"ECommerceWEB/API/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"32747755977","text":"import os\nimport json\nfrom functools import partial\nimport subprocess\nfrom itertools import filterfalse\nfrom typing import Callable, Iterable, List, Type\n\nimport pytest\n\nfrom ocrd_butler.util import logger\nfrom ocrd_butler.config import Config\nfrom ocrd_butler.database.models import (\n db,\n Workflow,\n Task,\n)\n\n\n\ndef load_mock_db_model(\n model_class: Type[db.Model], root_node: str, filename: str\n) -> Iterable:\n \"\"\" load example data from JSON file and instantiate model instances\n \"\"\"\n with open(filename, 'r') as f:\n for i, data in enumerate(json.load(f)[root_node]):\n _id = data.pop('id', None)\n model_instance = model_class.create(**data)\n model_instance.id = _id or i + 1\n yield model_instance\n\n\nload_mock_workflows = partial(load_mock_db_model, Workflow, 'workflows')\nload_mock_tasks = partial(load_mock_db_model, Task, 'tasks')\n\n\ndef test_profile_active() -> bool:\n \"\"\" determine if in ``TEST`` profile, based on ``PROFILE`` env var.\n \"\"\"\n return os.environ.get('PROFILE', '').lower() == 'test'\n\n\ndef ocrd_processor_available(processor: str) -> bool:\n \"\"\" checks whether or not processor executable can be found in environment,\n and is explicitly included in configuration.\n \"\"\"\n if processor in Config.PROCESSORS:\n try:\n return subprocess.check_output(\n ['which', processor]\n ) is not None\n except Exception:\n logger.error(\n 'No executable found for processor `%s`', processor\n )\n return False\n\n\ndef require_ocrd_processors(*processors: List[str]) -> Callable:\n \"\"\" decorator for tests that require specific OCRD processor executables.\n Translates to ``@pytest.mark.skipif`` decorator for allowing or skipping\n the test depending on the availability of *all* OCRD executables\n specified via parameter list. If no parameters are given, then *all*\n processors listed in the ``PROCESSORS`` attribute of\n :class:`~ocrd_butler.config.Config` must be available, otherwise the test\n will be skipped.\n \"\"\"\n if len(processors) < 1:\n processors = Config.PROCESSORS\n not_available = list(\n filterfalse(\n ocrd_processor_available,\n processors\n )\n )\n\n def decorator(f) -> Callable:\n \"\"\" decorates given function with a ``pytest.mark.skipif`` decoration\n indicating whether any of the required OCRD processor executables\n are missing in the current environment, i.e. whether the function\n (i.e. test case) can't possibly succeed and must hence be skipped.\n \"\"\"\n return pytest.mark.skipif(\n len(not_available) > 0,\n reason='Required OCRD processor(s) `{}` not found.'.format(\n ', '.join(not_available)\n )\n )(f)\n return decorator\n\n\nskip_in_test_profile = partial(\n pytest.mark.skipif(\n test_profile_active(),\n reason='Test not supposed to be run in `TEST` profile.'\n )\n)\n","repo_name":"StaatsbibliothekBerlin/ocrd_butler","sub_path":"tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3056,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"97"} +{"seq_id":"34642368707","text":"# !/usr/bin/python\n# -*-coding: utf-8 -*-\ndef maxProfit1(values):\n if len(values) < 2: return 0\n income = values[1] - values[0]\n res = income\n for i in range(2, len(values)):\n income = values[i] - values[i-1] + (income if income > 0 else 0)\n res = max(income, res)\n return max(res, 0)\n\nif __name__ == '__main__':\n print(maxProfit1([7,1,5,3,6,4]))","repo_name":"ladlod/my_leetcode","sub_path":"dp/leetcode121.py","file_name":"leetcode121.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"97"} +{"seq_id":"37747139799","text":"class Solution:\n def firstBadVersion(self, n: int) -> int:\n start, end = 1, n\n while start < end:\n mid = start + (end - start) // 2\n if isBadVersion(mid):\n end = mid\n else:\n start = mid + 1\n return start\n","repo_name":"ayushmangarg2003/DSA-LeetCode","sub_path":"LeetcodePython/278.py","file_name":"278.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"97"} +{"seq_id":"11133154056","text":"from rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom tools.models import Feature, Recommend\nfrom tools.serializers import TooPerfSerializer\nfrom tools.utils import get_top\n\n\nclass FindToolView(APIView):\n # authentication_classes =\n def post(self, request, *args, **kwargs):\n ser = TooPerfSerializer(data=self.request.data)\n ser.is_valid()\n ids = ser.validated_data['features']\n feat = Feature.objects.filter(id__in=ids).values_list('name', flat=True)\n tool_ids = get_top(feat)\n\n request.user.recommends.all().delete()\n new_recs = []\n for feat in ids:\n for tool in tool_ids:\n new_recs.append(\n Recommend(\n user=request.user,\n tool_id=tool,\n feature_id=feat\n )\n )\n # new_recs = [\n # [\n # Recommend(\n # user=request.user,\n # tool_id=tool,\n # feature_id=feat\n # )\n # for tool in tool_ids\n # ]\n # for feat in ids\n # ]\n Recommend.objects.bulk_create(new_recs)\n return Response(tool_ids, status=status.HTTP_200_OK)\n","repo_name":"erfanmoshiri/tool_recommender","sub_path":"tools/views/find_tool_view.py","file_name":"find_tool_view.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"13432097991","text":"n=int(input())\ngraph=[0]\nfor i in range(n):\n graph.append(int(input()))\nif n>=3:\n dp=[0 for _ in range(n+1)]\n dp[1]=graph[1]\n dp[2]=max(dp[1]+graph[2],graph[2])\n for i in range(3,n+1):\n dp[i]=max(dp[i-2],max(dp[:i-2])+graph[i-1])+graph[i]\n print(max(dp))\nif n<=2:\n print(sum(graph))","repo_name":"persShins/DataStructure","sub_path":"동적계획법/백준 2156.py","file_name":"백준 2156.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"31437532297","text":"from core.models import Pair\nfrom orders.models import Order\nfrom ticker.tests.base import TickerBaseTestCase\nfrom rest_framework.test import APIClient\nfrom support.models import Support\n\n\nclass BaseCoreApiTestCase(TickerBaseTestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.ENABLED_TICKER_PAIRS = ['BTCLTC']\n super(BaseCoreApiTestCase, cls).setUpClass()\n cls.pair = Pair.objects.get(name='BTCLTC')\n cls.api_client = APIClient()\n\n def _create_order_api(self, amount_base=3, token=None):\n order_data = {\n \"amount_base\": amount_base,\n \"pair\": {\n \"name\": self.pair.name\n },\n \"withdraw_address\": {\n \"address\": \"17dBqMpMr6r8ju7BoBdeZiSD3cjVZG62yJ\"\n }\n }\n if token is not None:\n self.api_client.credentials(\n Authorization=\"Bearer {}\".format(token)\n )\n order_api_url = '/en/api/v1/orders/'\n response = self.api_client.post(\n order_api_url, order_data, format='json').json()\n order = Order.objects.get(\n unique_reference=response['unique_reference']\n )\n token = response['token']\n return order, token\n\n def _create_support_ticket_api(self, msg, token=None):\n support_data = {\n 'email': '{}@emil.com'.format(msg),\n 'name': msg,\n 'message': msg,\n 'subject': msg,\n }\n if token is not None:\n self.api_client.credentials(\n Authorization=\"Bearer {}\".format(token)\n )\n support_api_url = '/en/api/v1/support/'\n response = self.api_client.post(\n support_api_url, support_data, format='json').json()\n support = Support.objects.get(\n unique_reference=response['unique_reference']\n )\n return support\n","repo_name":"hossamelneily/nexchange","sub_path":"accounts/tests/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"97"} +{"seq_id":"33236953946","text":"import logging\n\nfrom lxml import etree\n\nfrom .base import Parser\n\nlogger = logging.getLogger(f\"{__name__}\")\n\n\nclass EurovaistineParser(Parser):\n def __init__(self):\n super().__init__()\n self._base_url = \"https://www.eurovaistine.lt/\"\n self._type = \"Eurovaistine\"\n\n def _parse_url(self, el: etree.Element) -> str:\n xpath = \".//@href\"\n url = el.xpath(xpath)\n if url:\n return f\"{self._base_url}{url[0]}\"\n\n return \"\"\n\n def _parse_title(self, el: etree.Element) -> str:\n xpath = \".//div[@class='title']//text()\"\n title = el.xpath(xpath)\n if title:\n return title[0].strip()\n\n return \"\"\n\n def _parse_price(self, el: etree.Element) -> str:\n xpath = \".//div[contains(@class, 'oldProductPrice')]//text()\"\n price = el.xpath(xpath)\n\n if not price:\n xpath = \".//div[contains(@class, 'productPrice')]//text()\"\n price = el.xpath(xpath)\n\n if price:\n return price[0].strip()\n\n return \"\"\n\n def _parse_discounted_price(self, el: etree.Element) -> str:\n xpath = \".//div[contains(@class, 'oldProductPrice')]//text()\"\n price = el.xpath(xpath)\n\n if price:\n xpath = \".//div[contains(@class, 'productPrice')]//text()\"\n price = el.xpath(xpath)[0].strip()\n else:\n price = \"NotAvailable\"\n\n return price\n\n def parse(self, body: str) -> list[dict[str, str]]:\n tree = etree.HTML(body)\n products = tree.xpath(\n \".//div[contains(@class, 'product-list clearfix')]\"\n \"//*[@class='productCard']\"\n )\n logger.debug(f\"Found {len(products)} products in page.\")\n\n results = []\n for product in products:\n try:\n results.append(\n {\n \"url\": self._parse_url(product),\n \"title\": self._parse_title(product),\n \"price\": self._parse_price(product),\n \"discounted_price\": self._parse_discounted_price(product),\n \"type\": self._type,\n }\n )\n except Exception as e:\n logger.error(f\"Exception when parsing product: {e}\")\n\n return results\n","repo_name":"punksnotbread/shampoo_bot","sub_path":"src/shampoo_scraper/parsers/eurovaistine.py","file_name":"eurovaistine.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"41190541553","text":"import cv2\nimport cv2.cv as cv\nimport webbrowser\nfrom cv2.cv import *\n\nimageSize = \\\n[\n (160, 120),\n (320, 240),\n (424, 240),\n (640, 360),\n (800, 448),\n (960, 544),\n (1280, 768),\n (1920 , 1080),\n (3840 , 2160)\n]\n\ntitle='camera'\n\ncv.NamedWindow(title, cv.CV_WINDOW_AUTOSIZE)\ncapture = cv.CaptureFromCAM(0)\n\ncv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, 640)\ncv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT,360)\n\ncv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FORMAT, cv.IPL_DEPTH_32F)\n\n# print cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH)\n# print cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT)\n\n\ncascade = cv.Load(r'haarcascade_frontalface_alt.xml') \n\nwhile True:\n img = cv.QueryFrame(capture) \n\n storage = cv.CreateMemStorage(0) \n faces = cv.HaarDetectObjects(img, cascade, storage, 1.2, 2, CV_HAAR_DO_CANNY_PRUNING, (100,100))\n\n for (x, y, w, h), n in faces:\n cv.Rectangle(img, (x, y), (x + w,y + h), 255)\n \n cv.ShowImage(title, img)\n key = cv.WaitKey(100)\n\n if key == 32 : # Space\n cv.SaveImage(\"captured.png\", img)\n webbrowser.open_new_tab(\"captured.png\") \n \n elif key == 27: # Escape \t\n break\n\ncv.DestroyWindow(title)","repo_name":"Davidigest/pyWebCam","sub_path":"Tests/TestCapture.py","file_name":"TestCapture.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"97"} +{"seq_id":"39643565546","text":"#!/usr/bin/env python\n\nimport sys\nimport json\n\nin_fn = sys.argv[1]\nout_fn = sys.argv[2]\n\n#\n# https://stackoverflow.com/a/70154903/19410\n#\nclass RoundingFloat(float):\n __repr__ = staticmethod(lambda x: format(x, '.6f'))\njson.encoder.c_make_encoder = None\njson.encoder.float = RoundingFloat\n\nHD = {\n 'VN': '0.1',\n 'SO': 'coordinate',\n 'GO': 'none',\n}\n\nSQ = [\n {\n 'SN': 'chr1',\n 'LN': 248956422,\n },\n {\n 'SN': 'chr10',\n 'LN': 133797422,\n },\n {\n 'SN': 'chr11',\n 'LN': 135086622,\n },\n {\n 'SN': 'chr12',\n 'LN': 133275309,\n },\n {\n 'SN': 'chr13',\n 'LN': 114364328,\n },\n {\n 'SN': 'chr14',\n 'LN': 107043718,\n },\n {\n 'SN': 'chr15',\n 'LN': 101991189,\n },\n {\n 'SN': 'chr16',\n 'LN': 90338345,\n },\n {\n 'SN': 'chr17',\n 'LN': 83257441,\n },\n {\n 'SN': 'chr18',\n 'LN': 80373285,\n },\n {\n 'SN': 'chr19',\n 'LN': 58617616,\n },\n {\n 'SN': 'chr2',\n 'LN': 242193529,\n },\n {\n 'SN': 'chr20',\n 'LN': 64444167,\n },\n {\n 'SN': 'chr21',\n 'LN': 46709983,\n },\n {\n 'SN': 'chr22',\n 'LN': 50818468,\n },\n {\n 'SN': 'chr3',\n 'LN': 198295559,\n },\n {\n 'SN': 'chr4',\n 'LN': 190214555,\n },\n {\n 'SN': 'chr5',\n 'LN': 181538259,\n },\n {\n 'SN': 'chr6',\n 'LN': 170805979,\n },\n {\n 'SN': 'chr7',\n 'LN': 159345973,\n },\n {\n 'SN': 'chr8',\n 'LN': 145138636,\n },\n {\n 'SN': 'chr9',\n 'LN': 138394717,\n },\n {\n 'SN': 'chrM',\n 'LN': 16569,\n },\n {\n 'SN': 'chrX',\n 'LN': 156040895,\n },\n {\n 'SN': 'chrY',\n 'LN': 57227415,\n },\n]\n\ndef blocks_to_cigar(n_blocks, sizes_str, offsets_str):\n sizes = sizes_str.split(',')\n offsets = offsets_str.split(',')\n res = []\n i = 0\n acc = 0\n for offset, size in zip(offsets, sizes):\n # sys.stderr.write('i {} | offset {} | size {}\\n'.format(i, offset, size))\n if i == 0:\n res.append(size)\n res.append('M')\n acc = int(size)\n else:\n N = int(offset) - acc\n if N != 0:\n res.append(str(N))\n res.append('N')\n acc += N\n res.append(str(size))\n res.append('M')\n acc += int(size)\n else:\n acc += int(size)\n res[-2] = str(int(res[-2]) + int(size))\n i += 1\n # sys.stderr.write('res {}\\n'.format(res))\n return res\n\nwith open(out_fn, 'w') as out_fh:\n #\n # header\n #\n HD_ln_elems = ['\\t'.join(['@HD', 'VN:{}'.format(HD['VN']), 'SO:{}'.format(HD['SO']), 'GO:{}'.format(HD['GO'])]), '']\n HD_ln = '\\n'.join(HD_ln_elems)\n out_fh.write(HD_ln)\n SQ_ln_elems = ['\\t'.join(['@SQ', 'SN:{}'.format(sq['SN']), 'LN:{}'.format(sq['LN'])]) for sq in SQ] + ['']\n SQ_ln = '\\n'.join(SQ_ln_elems)\n out_fh.write(SQ_ln)\n line_idx = 0\n with open(in_fn, 'r') as in_fh:\n for line in in_fh:\n elems = line.rstrip().split('\\t')\n #\n # https://genome.ucsc.edu/FAQ/FAQformat.html#format1\n #\n line = {\n 'chrom': elems[0],\n 'chromStart': int(elems[1]),\n 'chromEnd': int(elems[2]),\n 'name': elems[3],\n 'score': int(elems[4]),\n 'strand': elems[5],\n 'thickStart': int(elems[6]),\n 'thickEnd': int(elems[7]),\n 'itemRgb': elems[8],\n 'blockCount': int(elems[9]),\n 'blockSizes': elems[10],\n 'blockStarts': elems[11],\n }\n [dhs_id, dhs_score, dhs_n] = line['name'].split('|')\n opt_co_obj = {\n 'summit': {'start':line['thickStart'],'end':line['thickEnd']},\n 'dhs': {'id':dhs_id, 'score':float(dhs_score), 'n':int(dhs_n)},\n 'rgb': line['itemRgb'],\n 'rgbf': [float(x)/255 for x in line['itemRgb'].split(',')],\n 'ucscScore': line['score'],\n }\n #\n # https://samtools.github.io/hts-specs/SAMv1.pdf\n #\n QNAME = dhs_id\n FLAG = str(0)\n RNAME = line['chrom']\n POS = str(line['chromStart'] + 1)\n MAPQ = str(255)\n CIGAR = ''.join(blocks_to_cigar(line['blockCount'], line['blockSizes'], line['blockStarts']))\n RNEXT = '*'\n PNEXT = str(0)\n TLEN = str(line['chromEnd'] - line['chromStart'])\n SEQ = '*'\n QUAL = '*'\n OPT = 'CO:Z:{}'.format(json.dumps(opt_co_obj, separators=(',', ':')))\n out_ln_elems = ['\\t'.join([\n QNAME,\n FLAG,\n RNAME,\n POS,\n MAPQ,\n CIGAR,\n RNEXT,\n PNEXT,\n TLEN,\n SEQ,\n QUAL,\n OPT,\n ])] + ['']\n out_ln = '\\n'.join(out_ln_elems)\n out_fh.write(out_ln)\n line_idx += 1\n # if line_idx == 1:\n # sys.exit(0)","repo_name":"alexpreynolds/dhs-bed12-to-bam","sub_path":"bed12_to_sam.py","file_name":"bed12_to_sam.py","file_ext":"py","file_size_in_byte":5380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"1728752650","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.profile, name='profile'),\n path('admin/', views.admin_profile, name='admin_profile_page'),\n path('order_history/<order_number>', views.order_history, name='order_history'),\n path('profile', views.profile, name='profile'),\n path('order_history/<order_number>', views.order_history, name='order_history'),\n path('download/<download_id>', views.counting_downloads, name='counting_downloads'),\n path('delete/<photo_id>', views.delete_user_photo, name='delete_user_photo'),\n path('upload/<int:product_id>/', views.image_upload, name='image_upload'),\n path('profile_upload/', views.image_upload_from_profile, name='image_profile_upload'),\n]\n","repo_name":"cgpalmer/ms4","sub_path":"profiles/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"97"} +{"seq_id":"33099747807","text":"from sys import maxsize\r\nc, n = map(int,input().split())\r\nff = []\r\nres = 0\r\nfor i in range(n):\r\n temp = list(map(int,input().split()))\r\n ff.append(temp)\r\ndp = [0] +[maxsize] * (c+100)\r\nfor cost, customer in ff:\r\n for cur_customer in range(customer, c + 101):\r\n dp[cur_customer] = min(dp[cur_customer], dp[cur_customer - customer] + cost)\r\n\r\nprint(min(dp[c:c + 101]))","repo_name":"Toritae/study","sub_path":"leetcode/bj1106.py","file_name":"bj1106.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"17669741109","text":"\"\"\"\nCapitalize each item of a list and update the list\n\"\"\"\nnames = ['charlotte hippopotamus turner', 'oliver st. john-mollusc',\n 'nigel incubator-jones', 'philip diplodocus mallory']\nprint(names)\n\nfor index in range(len(names)) :\n names[index] = names[index].title()\n\nprint(names)\n\n\n\"\"\"\nCreate Html list\n\"\"\"\ndef html_list(str_list):\n start_item_tag = \"<li>\"\n end_item_tag = \"</li>\"\n list_body = \"\"\n for each_str in str_list:\n list_body += start_item_tag + str(each_str) + end_item_tag + \"\\n\"\n return \"<ul>\\n\" + list_body + \"</ul>\"\n\nprint(html_list([\"strings\", 2.0, True, \"and other types too!\"]))\n\n\n\"\"\"\nSquare number\n\"\"\"\ndef square_num(limit):\n answer = 0\n while (answer+1)**2 < limit:\n answer += 1\n return answer**2\n\nprint(square_num(89))\n\n\n\"\"\"\nCargo problem\n\"\"\"\n# each item in the manifest is an item and its weight\nmanifest = [[\"bananas\", 15], [\"mattresses\", 34], [\"dog kennels\",42], [\"machine that goes ping!\", 120], [\"tea chests\", 10], [\"cheeses\", 0]]\n\ndef cargo_problem(manifest, weight_limit):\n weight_reached = 0\n for item in manifest:\n if weight_limit >= weight_reached + item[1]:\n weight_reached += item[1]\n else:\n break\n return weight_reached\n\nprint(cargo_problem(manifest, 52))\n\n\n\"\"\"\nCreate String from list exactly 140 char long\n\"\"\"\nheadlines = [\"Local Bear Eaten by Man\",\n \"Legislature Announces New Laws\",\n \"Peasant Discovers Violence Inherent in System\",\n \"Cat Rescues Fireman Stuck in Tree\",\n \"Brave Knight Runs Away\",\n \"Papperbok Review: Totally Triffic\"]\n\nnews_ticker = \"\"\n\nfor headline in headlines:\n news_ticker += headline + \" \"\n if len(news_ticker) >= 140:\n news_ticker = news_ticker[:140]\n break\n\nprint(len(news_ticker))\n\n\n\"\"\"\nRefactor code\n\"\"\"\n# Orignal code\ndef check_answers(my_answers,answers):\n \"\"\"\n Checks the five answers provided to a multiple choice quiz and returns the results.\n \"\"\"\n results= [None, None, None, None, None]\n if my_answers[0] == answers[0]:\n results[0] = True\n elif my_answers[0] != answers[0]:\n results[0] = False\n if my_answers[1] == answers[1]:\n results[1] = True\n elif my_answers[1] != answers[1]:\n results[1] = False\n if my_answers[2] == answers[2]:\n results[2] = True\n elif my_answers[2] != answers[2]:\n results[2] = False\n if my_answers[3] == answers[3]:\n results[3] = True\n elif my_answers[3] != answers[3]:\n results[3] = False\n if my_answers[4] == answers[4]:\n results[4] = True\n elif my_answers[4] != answers[4]:\n results[4] = False\n count_correct = 0\n count_incorrect = 0\n for result in results:\n if result == True:\n count_correct += 1\n if result != True:\n count_incorrect += 1\n if count_correct/5 > 0.7:\n return \"Congratulations, you passed the test! You scored \" + str(count_correct) + \" out of 5.\"\n elif count_incorrect/5 >= 0.3:\n return \"Unfortunately, you did not pass. You scored \" + str(count_correct) + \" out of 5.\"\n\nprint(check_answers(['a', 'a', 'a', 'd', 'e'], ['a', 'b', 'c', 'd', 'e']))\n\n# Refacored code\ndef check_answers_refactored(my_answers, answers):\n correct_count = 0\n for index in range(len(answers)):\n if my_answers[index] == answers[index]:\n correct_count += 1\n if correct_count/len(answers) > 0.7:\n return \"Congratulations, you passed the test! You scored \" + str(correct_count) + \" out of \" + str(len(answers)) + \".\"\n else:\n return \"Unfortunately, you did not pass. You scored \" + str(correct_count) + \" out of \" + str(len(answers)) + \".\"\n\nprint(check_answers_refactored(['a', 'b', 'c', 'a', 'e', 'f', 'g'], ['a', 'b', 'c', 'd', 'e', 'f', 'g']))\n\n\n\"\"\"\nRemoving duplicate\n\"\"\"\ndef remove_duplicates(list_wtih_duplicates):\n new_list = []\n contains = False\n for index in range(len(list_wtih_duplicates)):\n for index_new_list in range(len(new_list)):\n if new_list[index_new_list] == list_wtih_duplicates[index]:\n contains = True\n break\n if not contains:\n new_list.append(list_wtih_duplicates[index])\n else:\n contains = False\n return new_list\n\nprint(remove_duplicates(['john', 'lucy', 'anivia', 'john', 'james', 'harry']))\n\n\n\"\"\"\nRemoving duplicate refactor\n\"\"\"\ndef remove_duplicates_refactor(source):\n target = []\n for element in source:\n if element not in target:\n target.append(element)\n return target\n\nprint(remove_duplicates_refactor(['john', 'lucy', 'anivia', 'john', 'james', 'harry']))\n\n\n\"\"\"\nRemoving duplicates using a set\n\"\"\"\ndef remove_duplicates_with_set(source):\n source_set = set(source)\n return source_set\n\nprint(remove_duplicates_with_set(['john', 'lucy', 'anivia', 'john', 'james', 'harry']))\n\n\n\"\"\"\nNearest square with sets\n\"\"\"\ndef all_squares_till_limit(limit):\n square_set = set()\n square_num = 0\n while (square_num+1)**2 < limit:\n square_set.add((square_num+1)**2)\n square_num += 1\n return square_set\n\nprint(len(all_squares_till_limit(2000)))\n\n\n\"\"\"\nDictionary\n\"\"\"\npopulation = {'Istanbul': 13.3, 'Karachi': 13.0}\n\nprint(population['Mumbai'])\n","repo_name":"willbrom/python-script","sub_path":"script_1.py","file_name":"script_1.py","file_ext":"py","file_size_in_byte":5293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"3531582623","text":"import os \n\nimport subprocess\n\ndef clear_folder(path):\n if os.path.isdir(path):\n for element in os.listdir(path):\n\n element = os.path.join(path, element)\n\n if os.path.isfile(element):\n os.remove(element)\n\n else: \n clear_folder(element)\n\n os.rmdir(path)\n \n else:\n raise OSError(path, \" not a valid Directory\")\n\n\n\n\n\n\nif __name__ == \"__main__\":\n\n\n folder = \"attempt01/\"\n abbrev = \"01\"\n\n directory = folder + abbrev\n \n input_file = folder + \"input.txt\"\n coordinate_file = folder + \"coord.txt\"\n trajectories_file = \"coords.traj\"\n\n output_file = directory + \"_output\"\n\n energies_output = directory + \"_energies\"\n\n radial_distr_output = directory + \"_radial_distr\"\n\n \n\n\n print(\"Hello User, welcome to the MD-Atom Simulation. \\nRemember to set the correct files in the main.py file.\\nEnjoy!\\n\")\n\n if(os.path.isdir(directory)):\n clear_folder(directory)\n\n os.system(f\"mkdir {folder}\")\n os.system(\"cmake ..\")\n os.system(\"make\")\n\n \n os.system(f\"./mdatom {input_file} {coordinate_file} > {output_file}\")\n\n os.system(f\"python draw.py {trajectories_file}\")\n \n os.system(f\"python energy.py {output_file} > {energies_output}\")\n\n os.system(f\"python gr.py {output_file} > {radial_distr_output}\")\n\n os.system(f\"python plot.py {radial_distr_output} -r\")\n\n os.system(f\"python plot.py {energies_output} -e\")\n\n \n\n \n\n \n\n \n \n \n","repo_name":"kryo4096/MDAtom","sub_path":"mdatom-master/build/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"23271873239","text":"\nimport sys\nimport glob\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nfont = {'size': 18}\nmatplotlib.rc('font', **font)\n\n\ndef average_reward(filename):\n avg_reward = []\n with open(filename) as out:\n for line in out:\n if 'average' in line:\n avg_reward.append(float(line.strip().split()[-1]))\n return avg_reward\n\ndef smoothing(avg_reward):\n window = 20\n smooth_avg_reward = []\n for i in range(len(avg_reward)):\n smoothing_start = max(0, i - window)\n smoothing_end = min(len(avg_reward), i + window)\n smooth_avg_reward.append(np.mean(avg_reward[smoothing_start:smoothing_end]))\n return smooth_avg_reward\n\n\nsmooth_avg_reward_list = []\noutname_list = []\nfor outname in sys.argv[1:]:\n avg_reward = average_reward(glob.glob(outname)[0])\n smooth_avg_reward_list.append(smoothing(avg_reward))\n outname_list.append(outname)\n\nmin_point = min([sm[0] for sm in smooth_avg_reward_list])\n\noffset_list = []\nfor sm in smooth_avg_reward_list:\n offset_list.append(sm[0] - min_point)\n\nsmooth_avg_reward_offset_list = []\nfor i in range(len(smooth_avg_reward_list)):\n curve = [pt - offset_list[i] for pt in smooth_avg_reward_list[i]]\n plt.plot(curve, label=outname_list[i])\n\nplt.legend()\nplt.xlabel('Training frames (unit: 100,000 frames)')\nplt.ylabel('Average episode reward')\nplt.gcf().subplots_adjust(left=0.2)\nplt.gcf().subplots_adjust(bottom=0.15)\nplt.show()\n#~ plt.savefig(game + '_learning_curve.pdf')\n\n","repo_name":"spring01/deep_q_learning","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"30262771629","text":"from django.conf.urls import patterns, include, url\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'guessme.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', 'words.views.guessme', name=\"guessme\"),\n url(r'^getnextword/$', 'words.views.getnextword', name=\"getnextword\"),\n url(r'^addwords/$', 'words.views.add_words', name=\"add_words\"),\n\n )","repo_name":"techillies/guessme","sub_path":"bolradhabol/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"8669318526","text":"from multiprocessing import Process, freeze_support\r\nfrom config import instances\r\nfrom coin_seller import CoinSeller\r\n\r\n\r\ndef main() -> None:\r\n freeze_support()\r\n for instance in instances:\r\n coin_seller = CoinSeller(**instance)\r\n process = Process(target=coin_seller.run)\r\n process.start()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"nftscripts/bybit_ton","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"97"} +{"seq_id":"70633626879","text":"#!/usr/bin/env python\n\n\"\"\"\nSyntax: ./question2.py <ctab-file> <mapping-file> {0 | randomstring}\n0 for ignoring the line\nrandomstring will be appended if given\nIf there is no third argument 0 is default \n\"\"\"\nimport sys\n\nctab_file=open(sys.argv[1])\n\nmap_file=open(sys.argv[2])\no=open('ctab_uniprot_map.txt','w')\nctab_map={}\n\n#Create a dictionary with FB as key and AC as value\nfor line in map_file:\n #print line\n fields=line.rstrip(\"\\t\\n\").split(\"\\t\")\n # print fields[0]\n #print fields\n ctab_map[fields[0]]=fields[1]\n \n\ncount=0 \nfor line in ctab_file:\n if line.startswith(\"t_id\"):\n continue;\n fields=line.rstrip(\"\\t\\n\").split(\"\\t\")\n count+=1\n \n if fields[8] in ctab_map:\n newline=line+str((ctab_map[fields[8]]))\n \n \n else:\n if sys.argv[3]!='0':\n newline=line+str(sys.argv[3])\n elif sys.argv[3]=='0':\n continue\n else:\n continue\n \n o.write(newline+\"\\n\")\n \n \nctab_file.close()\nmap_file.close()\no.close()\n \n \n \n\n","repo_name":"NandakumarRajasekaran/qbb2017-answers","sub_path":"day2-homework/question2.py","file_name":"question2.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"10325185658","text":"\"\"\"\nDjango settings for georef project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.6/ref/settings/\n\"\"\"\n\nimport mimetypes\n\nmimetypes.add_type(\"image/svg+xml\", \".svg\", True)\nmimetypes.add_type(\"image/svg+xml\", \".svgz\", True)\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '!)z=&5x$riu)-=%q)3$6i3^hs_re4x+zsw*l&3ph*_=bq!23%a'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = []\n\n# Application definition\n\nINSTALLED_APPS = (\n 'bootstrap3',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.gis',\n 'sorl.thumbnail',\n 'taggit',\n 'georef',\n 'djgeojson'\n)\n\nLEAFLET_CONFIG = {\n 'PLUGINS': {\n 'draw': {\n 'css': \"leaflet/draw/leaflet.draw.css\",\n 'js': \"leaflet/draw/leaflet.draw.js\",\n },\n }\n}\n\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'djangoProject.urls'\n\nWSGI_APPLICATION = 'djangoProject.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.6/ref/settings/#databases\n\nimport dj_database_url\nDATABASES = {'default': dj_database_url.config()}\nPOSTGIS_VERSION = (2, 1, 1)\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\n\nLANGUAGE_CODE = 'fi-fi'\nLOCALE_CODE = 'fi_FI'\n\nTIME_ZONE = 'Europe/Helsinki'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = False\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.6/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'static'),\n)\n\nMEDIA_ROOT = \"C:/georef/media/\"\nMEDIA_URL = '/media/'\n\n\nALKUPERAISET = u'C:/georef/alkuperaiset'\nTAUSTA_MOSAIC = u'C:/georef/tausta_mosaic'\nGEOSERVER = {'host': \"http://localhost:8080/geoserver\",\n 'user': 'admin',\n 'password': 'geoserver',\n 'workspace': 'georef'}\n","repo_name":"LKajan/georef","sub_path":"djangoProject/djangoProject/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"18601032528","text":"from spack import *\n\n\nclass I7z(Package):\n \"\"\"A better i7 (and now i3, i5) reporting tool for Linux.\"\"\"\n\n homepage = \"https://code.google.com/archive/p/i7z/\"\n url = \"https://github.com/afontenot/i7z.git\"\n\n version('afontenot', git=\"https://github.com/afontenot/i7z.git\")\n version('epfl-scitas', git=\"https://github.com/epfl-scitas/i7z.git\", preferred=True)\n\n depends_on('ncurses')\n\n def install(self, spec, prefix):\n if spec.satisfies('^ncurses@6:'):\n filter_file(r'-lncurses', r'-lncurses -ltinfo', 'Makefile')\n make()\n make('prefix={0}'.format(prefix), 'install')\n","repo_name":"epfl-scitas/scitas-spack-packages","sub_path":"packages/i7z/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"12243967990","text":"class Solution:\n def longestSubstring(self, s: str, k: int) -> int:\n problem_letters=[]\n dic=Counter(s)\n valid=True\n for i in dic:\n if dic[i]<k:\n problem_letters.append(i)\n valid=False\n if valid:\n return len(s)\n for i in problem_letters:\n s=s.replace(i,\" \")\n splitted_str=s.split(\" \")\n maxi=0\n for st in splitted_str:\n maxi=max(maxi,self.longestSubstring(st,k))\n return maxi\n''' Time Complexity--O(n*n)\n Space Complexity--O(n)'''\n","repo_name":"Durgaprasad-kakarla/Leetcode","sub_path":"395. Longest Substring with At Least K Repeating Characters.py","file_name":"395. Longest Substring with At Least K Repeating Characters.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"33536297227","text":"# 94. Binary Tree Inorder Traversal\r\n# https://leetcode.com/problems/binary-tree-inorder-traversal/\r\n\r\n# Definition for a binary tree node.\r\n# class TreeNode:\r\n# def __init__(self, val=0, left=None, right=None):\r\n# self.val = val\r\n# self.left = left\r\n# self.right = right\r\nclass Solution:\r\n def inorderTraversal(self, root):\r\n ans=[]\r\n stack=[]\r\n curr=root\r\n while curr or stack:\r\n if curr:\r\n stack.append(curr)\r\n curr=curr.left\r\n else:\r\n curr=stack.pop()\r\n ans.append(curr.val)\r\n curr=curr.right\r\n return ans\r\n \r\n '''\r\n def inorder(node):\r\n if not node:\r\n return []\r\n return inorder(node.left)+[node.val]+inorder(node.right)\r\n return inorder(root)\r\n '''","repo_name":"Rushijaviya/September-LeetCoding-Challenge","sub_path":"08_Binary Tree Inorder Traversal.py","file_name":"08_Binary Tree Inorder Traversal.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"30339232152","text":"import pygame\n\n# Initialize pygame\npygame.init()\n\nSCREEN_WIDTH = 600\nSCREEN_HEIGTH = 300\n\n\ndisplay_surface = pygame.display.set_mode((SCREEN_WIDTH,SCREEN_HEIGTH))\npygame.display.set_caption(\"Discrete Movements\")\n\n# Set the games vales\nVELOCITY = 10\n\n# Load in images\ndragon_image = pygame.image.load(\"dragon_right.png\")\ndragon_rect = dragon_image.get_rect()\ndragon_rect.centerx = SCREEN_WIDTH//2\ndragon_rect.bottom = SCREEN_HEIGTH\n\n# Main Game Loop\nrunning = True\nwhile running:\n for event in pygame.event.get():\n \n if event.type == pygame.QUIT:\n running = False\n \n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n dragon_rect.x -= VELOCITY\n if event.key == pygame.K_RIGHT:\n dragon_rect.x += VELOCITY\n if event.key == pygame.K_UP:\n dragon_rect.y -= VELOCITY\n if event.key == pygame.K_DOWN:\n dragon_rect.y += VELOCITY\n \n # Fill the display surface to cover old images\n display_surface.fill((0,0,0))\n \n # Blit (copy) the assets to the screen\n display_surface.blit(dragon_image, dragon_rect)\n \n # Update the display\n pygame.display.update()\n \n# End the game \npygame.quit()","repo_name":"ioseluiz/basic-pygame","sub_path":"keyboard_movements.py","file_name":"keyboard_movements.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"24587912702","text":"'''\r\nThe Polygon patch allows us to draw polygons with different numbers of sides.\r\n\r\nHere is how to draw a square:\r\n\r\nfrom matplotlib import pyplot as plt\r\n\r\ndef draw_square():\r\n ax=plt.axes(xlim=(0,6),ylim=(0,6))\r\n square=plt.Polygon([(1,1),(5,1),(5,5),(1,5)],closed=True)\r\n ax.add_patch(square)\r\n plt.show()\r\n\r\nif __name__=='__main__':\r\n draw_square()\r\n\r\nThis snippet shows us how to create the circles and add them to the figure:\r\n\r\n\r\ny=1.5\r\nwhile y<5:\r\n x=1.5\r\n while x<5:\r\n c=draw_circle(x,y)\r\n ax.add_patch(c)\r\n\r\n x+=1\r\n y+=1\r\n'''\r\nfrom matplotlib import pyplot as plt\r\n\r\ndef draw_square():\r\n square=plt.Polygon([(1,1),(5,1),(5,5),(1,5)],closed=True)\r\n return square\r\n\r\n\r\ndef draw_circle(x,y):\r\n circle=plt.Circle((x,y),radius=0.5,fc='y')\r\n return circle\r\n\r\nif __name__=='__main__':\r\n ax=plt.gca()\r\n s=draw_square()\r\n ax.add_patch(s)\r\n y=1.5\r\n while y<5:\r\n x=1.5\r\n while x<5:\r\n c=draw_circle(x,y)\r\n ax.add_patch(c)\r\n x+=1.0\r\n y+=1.0\r\n plt.axis('scaled')\r\n plt.show()\r\n","repo_name":"zosopick/mathwithpython1","sub_path":"Excersises/Chapter 6/1. Packing Circles into a Square.py","file_name":"1. Packing Circles into a Square.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"74829092799","text":"\ndef right(t, n):\n for i in range(n):\n a = [t[3]] + t[:3] + t[4:]\n t = a[:]\n return t[:]\n\ndef left(t, n):\n for i in range(n):\n a = t[1:4] + [t[0]] + t[4:]\n t = a[:]\n return t[:]\n\ndef front(t, n):\n for i in range(n):\n a = t[:]\n a[0] = t[5]\n a[2] = t[4]\n a[4] = t[0]\n a[5] = t[2]\n t = a[:]\n return t[:]\n\ndef back(t, n):\n for i in range(n):\n a = t[:]\n a[0] = t[4]\n a[2] = t[5]\n a[4] = t[2]\n a[5] = t[0]\n t = a[:]\n return t[:]\n\nif __name__ == \"__main__\":\n while True:\n num = input(\"Enter the desired goal cell number: \")\n if not num.isdigit() or num == '':\n print('Incorrect value, try again')\n continue\n\n num = int(num)\n if num < 1:\n print('Incorrect value, try again')\n continue\n break\n a = [3, 1, 4, 6, 2, 5]\n duration = 1\n cur = 1\n while True:\n if cur + duration <= num:\n a = right(a, duration)\n cur += duration\n else:\n a = right(a, num - cur)\n break\n if cur + duration <= num:\n a = front(a, duration)\n cur += duration\n else:\n a = front(a, num - cur)\n break\n duration += 1\n if cur + duration <= num:\n a = left(a, duration)\n cur += duration\n else:\n a = left(a, num - cur)\n break\n if cur + duration <= num:\n a = back(a, duration)\n cur += duration\n else:\n a = back(a, num - cur)\n break\n duration += 1\n print(\"On cell %d, %d is at the top, %d at the front, and %d on the right.\" % \\\n (num, a[0], a[4], a[1]))\n","repo_name":"DARRENSKY/COMP9021","sub_path":"assignment/assignment_1/pivoting_die.py","file_name":"pivoting_die.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"97"} +{"seq_id":"39509608972","text":"from unittest import TestCase\n\nfrom liblab_sdk_djbabs.api_resources.mixins import PathBuilder\n\n\nclass TestPathBuilder(TestCase):\n def test_build_pagination(self):\n t = PathBuilder(base_url=\"http://test.com\", domain=\"movie\", version=\"v2\",\n pagination={\"page\": 2, \"limit\": 100}).build()\n\n TestCase.assertEqual(self, first=\"http://test.com/v2/movie?page=2&limit=100\", second=t[1])\n\n def test_build_sort(self):\n t = PathBuilder(base_url=\"http://test.com\", domain=\"movie\", version=\"v2\",\n sort={\"name\": \"asc\"}).build()\n\n TestCase.assertEqual(self, first=\"http://test.com/v2/movie?sort=name%3Aasc\", second=t[1])\n\n def test_build_filter(self):\n t = PathBuilder(base_url=\"http://test.com\", domain=\"movie\", version=\"v2\",\n params={\"name\": {\">\": 10}}).build()\n\n TestCase.assertEqual(self, first=\"http://test.com/v2/movie?name>10\", second=t[1])\n","repo_name":"okemechris/christian_okeme-SDK","sub_path":"tests/test_mixins.py","file_name":"test_mixins.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"25555930591","text":"import unittest\nimport datetime\nimport recurly\nfrom six import iterkeys\nfrom six.moves import filter\nfrom dateutil.relativedelta import relativedelta\nfrom recurly.errors import BadRequestError\n\nrecurly.API_KEY = 'blah'\n\nimport mocurly.core\nimport mocurly.endpoints\nimport mocurly.backend\n\nclass TestSubscriptions(unittest.TestCase):\n def setUp(self):\n self.mocurly_ = mocurly.core.mocurly()\n self.mocurly_.start()\n\n self.base_address_data = {\n 'address1': '123 Jackson St.',\n 'address2': 'Data City',\n 'state': 'CA',\n 'zip': '94105'\n }\n self.base_billing_info_data = {\n 'uuid': 'blah',\n 'first_name': 'Foo',\n 'last_name': 'Bar'\n }\n self.base_account_data = {\n 'uuid': 'blah',\n 'account_code': 'blah',\n 'email': 'foo@bar.com',\n 'first_name': 'Foo',\n 'last_name': 'Bar',\n 'address': self.base_address_data,\n 'hosted_login_token': 'abcd1234',\n 'created_at': '2014-08-11'\n }\n mocurly.backend.accounts_backend.add_object(self.base_account_data['uuid'], self.base_account_data)\n mocurly.backend.billing_info_backend.add_object(self.base_billing_info_data['uuid'], self.base_billing_info_data)\n\n self.base_plan_data = {\n 'plan_code': 'gold',\n 'name': 'Gold Plan',\n 'unit_amount_in_cents': recurly.Money(USD=1000, EUR=800)\n }\n self.base_backed_plan_data = self.base_plan_data.copy()\n self.base_backed_plan_data['uuid'] = self.base_plan_data['plan_code']\n self.base_backed_plan_data['unit_amount_in_cents'] = {u'USD': u'1000', u'EUR': u'800'}\n self.base_backed_plan_data['display_quantity'] = False\n self.base_backed_plan_data['trial_interval_length'] = 0\n self.base_backed_plan_data['plan_interval_unit'] = 'months'\n self.base_backed_plan_data['created_at'] = '2014-08-20'\n self.base_backed_plan_data['tax_exempt'] = False\n self.base_backed_plan_data['trial_interval_unit'] = 'months'\n self.base_backed_plan_data['plan_interval_length'] = 1\n\n self.base_subscription_data = {\n 'plan_code': 'gold',\n 'account': recurly.Account(account_code=self.base_account_data['account_code']),\n 'currency': 'USD'\n }\n\n self.base_add_on_backed_data = [\n {\n 'add_on_code': 'foo',\n 'name': 'Foo',\n 'plan': self.base_backed_plan_data['plan_code'],\n 'accounting_code': 'foo',\n 'unit_amount_in_cents': {'USD': 1000},\n 'created_at': '2014-08-20',\n },\n {\n 'add_on_code': 'bar',\n 'name': 'Bar',\n 'plan': self.base_backed_plan_data['plan_code'],\n 'accounting_code': 'bar',\n 'unit_amount_in_cents': {'USD': 80},\n 'created_at': '2014-08-20',\n }\n ]\n self.base_add_on_data = [\n {\n 'add_on_code': 'foo',\n 'unit_amount_in_cents': {'USD': 1000}\n },\n {\n 'add_on_code': 'bar',\n 'unit_amount_in_cents': {'USD': 80}\n }\n ]\n\n def tearDown(self):\n self.mocurly_.stop()\n\n def test_simple_plan_add_on_creation(self):\n # add a sample plan to the plans backend\n mocurly.backend.plans_backend.add_object(self.base_backed_plan_data['plan_code'], self.base_backed_plan_data)\n\n self.assertEqual(len(mocurly.backend.plan_add_ons_backend.datastore), 0)\n\n # now create some addons\n plan = recurly.Plan.get(self.base_backed_plan_data['plan_code'])\n for add_on in self.base_add_on_data:\n add_on['name'] = add_on['add_on_code'].upper()\n add_on['unit_amount_in_cents'] = recurly.Money(**add_on['unit_amount_in_cents'])\n plan.create_add_on(recurly.AddOn(**add_on))\n\n self.assertEqual(len(mocurly.backend.plan_add_ons_backend.datastore), 2)\n foo_add_on_backed = mocurly.backend.plan_add_ons_backend.get_object(self.base_backed_plan_data['plan_code'] + '__foo')\n add_ons = filter(lambda add_on: add_on['add_on_code'] == 'foo', self.base_add_on_data)\n foo_add_on = next(add_ons)\n for k, v in foo_add_on.items():\n if k == 'unit_amount_in_cents':\n self.assertEqual(foo_add_on_backed[k], dict((curr, str(amt)) for curr, amt in v.currencies.items()))\n else:\n self.assertEqual(foo_add_on_backed[k], v)\n\n bar_add_on_backed = mocurly.backend.plan_add_ons_backend.get_object(self.base_backed_plan_data['plan_code'] + '__bar')\n add_ons = filter(lambda add_on: add_on['add_on_code'] == 'bar', self.base_add_on_data)\n bar_add_on = next(add_ons)\n for k, v in bar_add_on.items():\n if k == 'unit_amount_in_cents':\n self.assertEqual(bar_add_on_backed[k], dict((curr, str(amt)) for curr, amt in v.currencies.items()))\n else:\n self.assertEqual(bar_add_on_backed[k], v)\n\n # make sure foreign keys are linked properly\n self.assertEqual(len(plan.add_ons()), 2)\n\n def test_simple_plan_creation(self):\n self.assertEqual(len(mocurly.backend.plans_backend.datastore), 0)\n\n new_plan = recurly.Plan(**self.base_plan_data)\n new_plan.save()\n\n self.assertEqual(len(mocurly.backend.plans_backend.datastore), 1)\n new_plan_backed = mocurly.backend.plans_backend.get_object(new_plan.plan_code)\n for k, v in self.base_plan_data.items():\n if k == 'unit_amount_in_cents':\n self.assertEqual(new_plan_backed[k], dict((curr, str(amt)) for curr, amt in v.currencies.items()))\n else:\n self.assertEqual(new_plan_backed[k], v)\n\n def test_plan_deletion(self):\n mocurly.backend.plans_backend.add_object(self.base_backed_plan_data['plan_code'], self.base_backed_plan_data)\n self.assertEqual(len(mocurly.backend.plans_backend.datastore), 1)\n recurly.Plan.get(self.base_backed_plan_data['plan_code']).delete()\n self.assertEqual(len(mocurly.backend.plans_backend.datastore), 0)\n\n def test_simple_subscription_creation(self):\n # add a sample plan to the plans backend\n mocurly.backend.plans_backend.add_object(self.base_backed_plan_data['plan_code'], self.base_backed_plan_data)\n\n self.assertEqual(len(mocurly.backend.subscriptions_backend.datastore), 0)\n\n new_subscription = recurly.Subscription(**self.base_subscription_data)\n new_subscription.save()\n\n self.assertEqual(len(mocurly.backend.subscriptions_backend.datastore), 1)\n\n # Make sure a new transaction and invoice was created with it\n invoice = new_subscription.invoice()\n self.assertEqual(\n invoice.line_items[0].start_date.date(),\n datetime.datetime.utcnow().date())\n self.assertEqual(\n invoice.line_items[0].end_date.date(),\n (datetime.datetime.utcnow() + relativedelta(months=1)).date())\n transactions = invoice.transactions\n self.assertEqual(len(transactions), 1)\n self.assertEqual(transactions[0].subscription().uuid, new_subscription.uuid)\n\n # Make sure we can reference the subscription from the account\n account = new_subscription.account()\n account_subscriptions = account.subscriptions()\n self.assertEqual(len(account_subscriptions), 1)\n self.assertEqual(account_subscriptions[0].uuid, new_subscription.uuid)\n\n def test_trial_subscription_creation(self):\n # add a sample plan to the plans backend\n self.base_backed_plan_data['trial_interval_length'] = 1\n mocurly.backend.plans_backend.add_object(self.base_backed_plan_data['plan_code'], self.base_backed_plan_data)\n\n self.assertEqual(len(mocurly.backend.subscriptions_backend.datastore), 0)\n\n new_subscription = recurly.Subscription(**self.base_subscription_data)\n new_subscription.save()\n\n self.assertEqual(len(mocurly.backend.subscriptions_backend.datastore), 1)\n\n # Make sure a new transaction and invoice was created with it\n invoice = new_subscription.invoice()\n self.assertEqual(\n invoice.line_items[0].start_date.date(),\n datetime.datetime.utcnow().date())\n self.assertEqual(\n invoice.line_items[0].end_date.date(),\n (datetime.datetime.utcnow() + relativedelta(months=1)).date())\n transactions = invoice.transactions\n self.assertEqual(len(transactions), 1)\n self.assertEqual(transactions[0].subscription().uuid, new_subscription.uuid)\n\n # Make sure we can reference the subscription from the account\n account = new_subscription.account()\n account_subscriptions = account.subscriptions()\n self.assertEqual(len(account_subscriptions), 1)\n self.assertEqual(account_subscriptions[0].uuid, new_subscription.uuid)\n self.assertEqual(account_subscriptions[0].state, 'active')\n\n def test_trial_override_subscription_creation(self):\n # add a sample plan to the plans backend\n self.base_backed_plan_data['trial_interval_length'] = 1\n mocurly.backend.plans_backend.add_object(self.base_backed_plan_data['plan_code'], self.base_backed_plan_data)\n\n self.assertEqual(len(mocurly.backend.subscriptions_backend.datastore), 0)\n\n # Make the trial immediately expire\n self.base_subscription_data['trial_ends_at'] = datetime.datetime.utcnow()\n new_subscription = recurly.Subscription(**self.base_subscription_data)\n new_subscription.save()\n\n self.assertEqual(len(mocurly.backend.subscriptions_backend.datastore), 1)\n\n # Make sure a new transaction and invoice was created with it\n invoice = new_subscription.invoice()\n # The invoice should not contain 0 (trial cost) but the actual cost of the subscription since the trial has expired immediately\n self.assertNotEqual(invoice.total_in_cents, 0)\n\n transactions = invoice.transactions\n self.assertEqual(len(transactions), 1)\n self.assertEqual(transactions[0].subscription().uuid, new_subscription.uuid)\n\n # Make sure we can reference the subscription from the account\n account = new_subscription.account()\n account_subscriptions = account.subscriptions()\n self.assertEqual(len(account_subscriptions), 1)\n self.assertEqual(account_subscriptions[0].uuid, new_subscription.uuid)\n self.assertEqual(account_subscriptions[0].state, 'active')\n\n def test_subscriptions_with_addons(self):\n # add a sample plan to the plans backend\n mocurly.backend.plans_backend.add_object(self.base_backed_plan_data['plan_code'], self.base_backed_plan_data)\n # add some add ons to the backend\n for add_on in self.base_add_on_backed_data:\n uuid = mocurly.endpoints.PlansEndpoint().generate_plan_add_on_uuid(self.base_backed_plan_data['plan_code'], add_on['add_on_code'])\n mocurly.backend.plan_add_ons_backend.add_object(uuid, add_on)\n\n self.assertEqual(len(mocurly.backend.subscriptions_backend.datastore), 0)\n\n self.base_subscription_data['subscription_add_ons'] = [recurly.SubscriptionAddOn(add_on_code=addon['add_on_code'], quantity=1) for addon in self.base_add_on_data]\n new_subscription = recurly.Subscription(**self.base_subscription_data)\n new_subscription.save()\n\n self.assertEqual(len(mocurly.backend.subscriptions_backend.datastore), 1)\n\n def test_subscription_filtering(self):\n # add a sample plan to the plans backend\n mocurly.backend.plans_backend.add_object(self.base_backed_plan_data['plan_code'], self.base_backed_plan_data)\n # add multiple subscriptions in different states\n self.base_subscription_data['uuid'] = 'foo'\n self.base_subscription_data['account'] = self.base_account_data['account_code']\n self.base_subscription_data['state'] = 'active'\n mocurly.backend.subscriptions_backend.add_object('foo', self.base_subscription_data)\n self.base_subscription_data['uuid'] = 'bar'\n self.base_subscription_data['state'] = 'future'\n mocurly.backend.subscriptions_backend.add_object('bar', self.base_subscription_data)\n for i in range(5):\n self.base_subscription_data['uuid'] = str(i)\n self.base_subscription_data['state'] = 'expired'\n mocurly.backend.subscriptions_backend.add_object(str(i), self.base_subscription_data)\n\n account = recurly.Account.get(self.base_account_data['account_code'])\n active_subscriptions = account.subscriptions(state='active')\n self.assertEqual(len(active_subscriptions), 1)\n self.assertEqual(active_subscriptions[0].uuid, 'foo')\n\n future_subscriptions = account.subscriptions(state='future')\n self.assertEqual(len(future_subscriptions), 1)\n self.assertEqual(future_subscriptions[0].uuid, 'bar')\n\n live_subscriptions = account.subscriptions(state='live')\n self.assertEqual(len(live_subscriptions), 2)\n self.assertEqual(set(['foo', 'bar']), set([sub.uuid for sub in live_subscriptions]))\n\n expired_subscriptions = account.subscriptions(state='expired')\n self.assertEqual(len(expired_subscriptions), 5)\n self.assertEqual(set(range(5)), set([int(sub.uuid) for sub in expired_subscriptions]))\n\n def test_subscription_termination_full_refund(self):\n # add a sample plan to the plans backend\n mocurly.backend.plans_backend.add_object(self.base_backed_plan_data['plan_code'], self.base_backed_plan_data)\n # add an active subscription\n new_subscription = recurly.Subscription(**self.base_subscription_data)\n new_subscription.save()\n self.assertEqual(new_subscription.state, 'active')\n\n # Now terminate it with a full refund\n new_subscription.terminate(refund='full')\n \n self.assertEqual(new_subscription.state, 'expired')\n invoice = new_subscription.invoice()\n transactions = invoice.transactions\n self.assertEqual(len(transactions), 1)\n self.assertEqual(transactions[0].status, 'void')\n\n def test_subscription_termination_partial_refund(self):\n # add a sample plan to the plans backend\n mocurly.backend.plans_backend.add_object(self.base_backed_plan_data['plan_code'], self.base_backed_plan_data)\n # add an active subscription\n new_subscription = recurly.Subscription(**self.base_subscription_data)\n new_subscription.save()\n self.assertEqual(new_subscription.state, 'active')\n\n # modify start time so a partial refund will refund half the cost\n start = new_subscription.current_period_started_at\n end = new_subscription.current_period_ends_at\n new_start = start - (end-start)\n mocurly.backend.subscriptions_backend.update_object(new_subscription.uuid, {'current_period_started_at': new_start.isoformat()})\n\n # get the original transaction and invoice objects for use later\n self.assertEqual(len(mocurly.backend.transactions_backend.datastore), 1)\n self.assertEqual(len(mocurly.backend.invoices_backend.datastore), 1)\n transaction_keys = iterkeys(mocurly.backend.transactions_backend.datastore)\n invoice_keys = iterkeys(mocurly.backend.invoices_backend.datastore)\n original_transaction_id = next(transaction_keys)\n original_invoice_id = next(invoice_keys)\n\n # Now terminate it with a partial refund\n new_subscription.terminate(refund='partial')\n \n self.assertEqual(new_subscription.state, 'expired')\n self.assertEqual(len(mocurly.backend.transactions_backend.datastore), 2)\n self.assertEqual(len(mocurly.backend.invoices_backend.datastore), 2)\n for invoice_id, invoice in mocurly.backend.invoices_backend.datastore.items():\n if invoice_id == original_invoice_id:\n original_invoice = invoice\n else:\n refund_invoice = invoice\n # The following two lines reference the objects to make sure they exist\n original_invoice\n refund_invoice\n\n original_transaction_ids = original_invoice['transactions']\n self.assertEqual(len(original_transaction_ids), 1)\n self.assertEqual(original_transaction_ids[0], original_transaction_id)\n original_transaction = mocurly.backend.transactions_backend.get_object(original_transaction_id)\n self.assertEqual(original_transaction['status'], 'success') # not voided\n\n refund_transaction_ids = refund_invoice['transactions']\n self.assertEqual(len(refund_transaction_ids), 1)\n self.assertNotEqual(refund_transaction_ids[0], original_transaction_id)\n refund_transaction = mocurly.backend.transactions_backend.get_object(refund_transaction_ids[0])\n self.assertEqual(refund_transaction['status'], 'success')\n self.assertEqual(refund_transaction['action'], 'refund')\n # 29 / 60, because today doesn't count\n self.assertEqual(refund_transaction['amount_in_cents'], -int(original_transaction['amount_in_cents'] * (29.0 / 60)))\n\n def test_subscription_cancel_reactivate(self):\n # add a sample plan to the plans backend\n mocurly.backend.plans_backend.add_object(self.base_backed_plan_data['plan_code'], self.base_backed_plan_data)\n # add an active subscription\n new_subscription = recurly.Subscription(**self.base_subscription_data)\n new_subscription.save()\n self.assertEqual(new_subscription.state, 'active')\n\n # trying to reactivate an active subscription should fail\n with self.assertRaises(BadRequestError):\n new_subscription.reactivate()\n\n # now cancel it and verify it was canceled\n new_subscription.cancel()\n self.assertEqual(new_subscription.state, 'canceled')\n\n # now reactivate it and verify it was reactivated\n new_subscription.reactivate()\n self.assertEqual(new_subscription.state, 'active')\n self.assertEqual(new_subscription.canceled_at, None)\n self.assertEqual(new_subscription.expires_at, None)\n\n def test_coupon_redemption_percent_discount(self):\n # add a sample plan to the plans backend\n mocurly.backend.plans_backend.add_object(self.base_backed_plan_data['plan_code'], self.base_backed_plan_data)\n # add a sample coupon to the coupons backend\n base_coupon_data = {\n 'coupon_code': 'special',\n 'name': 'Special 10% off',\n 'discount_type': 'percent',\n 'discount_percent': 10\n }\n mocurly.backend.coupons_backend.add_object(base_coupon_data['coupon_code'], base_coupon_data)\n # ... and redeem the coupon\n base_redemption_data = {\n 'account_code': self.base_account_data['account_code'],\n 'currency': 'USD',\n 'coupon': base_coupon_data['coupon_code']\n }\n mocurly.backend.coupon_redemptions_backend.add_object(base_redemption_data['coupon'] + '__' + self.base_account_data['account_code'], base_redemption_data)\n\n # Now add an active subscription and verify the transactions are discounted properly\n new_subscription = recurly.Subscription(**self.base_subscription_data)\n new_subscription.save()\n invoice = new_subscription.invoice()\n self.assertEqual(invoice.total_in_cents, 900)\n line_items = invoice.line_items\n self.assertEqual(len(line_items), 1)\n line_item = line_items[0]\n self.assertEqual(line_item.unit_amount_in_cents, 1000)\n self.assertEqual(line_item.discount_in_cents, 100)\n self.assertEqual(line_item.total_in_cents, 900)\n\n def test_coupon_redemption_direct_discount(self):\n # add a sample plan to the plans backend\n mocurly.backend.plans_backend.add_object(self.base_backed_plan_data['plan_code'], self.base_backed_plan_data)\n # add a sample coupon to the coupons backend\n base_coupon_data = {\n 'coupon_code': 'special',\n 'name': 'Special $1.00 off',\n 'discount_type': 'dollars',\n 'discount_in_cents': 100\n }\n mocurly.backend.coupons_backend.add_object(base_coupon_data['coupon_code'], base_coupon_data)\n # ... and redeem the coupon\n base_redemption_data = {\n 'account_code': self.base_account_data['account_code'],\n 'currency': 'USD',\n 'coupon': base_coupon_data['coupon_code']\n }\n mocurly.backend.coupon_redemptions_backend.add_object(base_redemption_data['coupon'] + '__' + self.base_account_data['account_code'], base_redemption_data)\n\n # Now add an active subscription and verify the transactions are discounted properly\n new_subscription = recurly.Subscription(**self.base_subscription_data)\n new_subscription.save()\n invoice = new_subscription.invoice()\n self.assertEqual(invoice.total_in_cents, 900)\n line_items = invoice.line_items\n self.assertEqual(len(line_items), 1)\n line_item = line_items[0]\n self.assertEqual(line_item.unit_amount_in_cents, 1000)\n self.assertEqual(line_item.discount_in_cents, 100)\n self.assertEqual(line_item.total_in_cents, 900)\n","repo_name":"Captricity/mocurly","sub_path":"tests/test_subscriptions.py","file_name":"test_subscriptions.py","file_ext":"py","file_size_in_byte":21817,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"96"} +{"seq_id":"33618697286","text":"#!/bin/python\nimport re,csv,numpy\n\ndef main(): \n \n sourcefile = open('43-traing.csv') \n #sourcefile = open('43-test.csv') \n #sourcefile = open('43-traing-validation.csv') \n #sourcefile = open('43-validation.csv') \n lines = sourcefile.readlines() \n rows1 = len(lines)\n output_file = open('43-traing_10_pure_feature_grouped.csv','w')\n #output_file = open('43-test_10_pure_feature_grouped.csv','w')\n #output_file = open('43-traing-validation_10_pure_feature_grouped.csv','w')\n #output_file = open('43-validation_10_pure_feature_grouped.csv','w')\n\n line = lines[1].strip()\n items = line.split(',')\n item_count = len(items)\n feature_vector = [None]*(item_count-3)\n \n class_vector = []\n for j in range(2,item_count-1):\n feature_vector[j-2] = []\n\n for i in range(rows1):\n line = lines[i].strip()\n items = line.split(',')\n class_vector.append(items[-1])\n for j in range(2,item_count-1):\n feature_vector[j-2].append(items[j])\n\n def groupMerge(*args):\n column=len(args)\n data_point_number = len(args[0])\n Merged_group = ['0']*data_point_number\n for i in range(column):\n for j in range(data_point_number):\n if str(args[i][j])==str(1):\n Merged_group[j]=str(1)\n return Merged_group\n\n group3_9_12_13_14_26_28_33_34_42=groupMerge(feature_vector[2], feature_vector[8], feature_vector[11],feature_vector[12], \\\n feature_vector[13],feature_vector[25], feature_vector[27], feature_vector[32],feature_vector[33], feature_vector[41])\n\n output_file.write('feature 3_9_13_14_26_33_34_42_pure' + ',' + ','.join(group3_9_12_13_14_26_28_33_34_42[:]) +'\\n')\n\n sourcefile.close()\n output_file.close()\n\nif __name__ == \"__main__\": main()\n\n","repo_name":"bioinformatics-gao/CASE-BBB-prediction-code","sub_path":"grouping/step9e2_merge_pure_in_group.py","file_name":"step9e2_merge_pure_in_group.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"4283736142","text":"\n\nlookup = {}\nlookup = dict()\nlookup = {'age': 42, 'loc': 'Italy'}\nlookup = dict(age=42, loc='Italy')\n\nclass Wizard:\n def __init__(self, name, level):\n self.level = level\n self.name = name\n\ngandalf = Wizard(\"Gandalf\", 42)\nprint(gandalf.__dict__)\n\nprint(lookup)\nprint(lookup['loc'])\n\nlookup['cat'] = 'Fun code demos'\n\nif 'cat' in lookup:\n print(lookup['cat'])\n\nimport collections\n\nuser = collections.namedtuple('User', 'id, name, email')\nusers = [\n user(1, 'user1', 'user1@yahoo.com'),\n user(2, 'user2', 'user2@yahoo.com'),\n user(3, 'user3', 'user3@yahoo.com'),\n user(4, 'user4', 'user4@yahoo.com'),\n user(5, 'user5', 'user5@yahoo.com')\n]\n\nlookup = dict()\nfor u in users:\n lookup[u.id] = u\n\nprint(lookup[4])\n\n # can change to [u.email] etc\n # lookup[u.email] = u\n # print(lookup['user4@yahoo.com'])\n\n","repo_name":"shiv0910/TalkPythonApps","sub_path":"09Real_estate/concept_dicts.py","file_name":"concept_dicts.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"8440299879","text":"#!/usr/bin/python\n\n#source: https://www.orangecoat.com/how-to/read-and-decode-data-from-your-mouse-using-this-pyusb-hack\n\nimport sys\nimport usb.core\nimport usb.util\n# decimal vendor and product values\ndev = usb.core.find(idVendor=0x046d, idProduct=0xc077)\n# or, uncomment the next line to search instead by the hexidecimal equivalent\n#dev = usb.core.find(idVendor=0x45e, idProduct=0x77d)\n# first endpoint\ninterface = 0\nprint(dev)\nendpoint = dev[0][(0,0)][0]\n# if the OS kernel already claimed the device, which is most likely true\n# thanks to http://stackoverflow.com/questions/8218683/pyusb-cannot-set-configuration\nif dev.is_kernel_driver_active(interface) is True:\n # tell the kernel to detach\n dev.detach_kernel_driver(interface)\n # claim the device\n usb.util.claim_interface(dev, interface)\ncollected = 0\nattempts = 50\nwhile collected < attempts :\n try:\n data = dev.read(endpoint.bEndpointAddress,endpoint.wMaxPacketSize)\n collected += 1\n print(data)\n except usb.core.USBError as e:\n data = None\n if e.args == ('Operation timed out',):\n continue\n# release the device\nusb.util.release_interface(dev, interface)\n# reattach the device to the OS kernel\ndev.attach_kernel_driver(interface)","repo_name":"taxfromdk/obsbot_tiny_reversing","sub_path":"research/mouse/mouse.py","file_name":"mouse.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"96"} +{"seq_id":"20061925760","text":"import socket\nimport random\nimport time\n\nimport os\nimport os.path\n\nfrom enum import Enum\n\nfrom membership_list import MembershipList, Status\nfrom metadata_list import MetadataList, FileStatus\nfrom messages import create_message, MessageType\nfrom protocol import ProtocolBase, ProtocolType\n\nfrom utils import list_to_str, id_to_address, demo_print, debug_print\n\nclass ResponseStatus(Enum):\n OKAY = \"okay\"\n CONFLICT = \"write-read or write-write conflict\"\n FILE_NOT_FOUND = \"file not found\"\n\nclass All2All(ProtocolBase):\n @staticmethod\n def get_type():\n return ProtocolType.ALL2ALL\n\n @staticmethod\n def process_message(address, message_json, mem_list, meta_list, self_id, writing_local_fname, writing_sdfs_fname, reading_local_fname, reading_sdfs_fname):\n message_type = message_json[\"type\"]\n\n if message_type == MessageType.HEARTBEAT.value:\n sender_id = message_json[\"sender_id\"]\n seqnum = message_json[\"seqnum\"]\n sender_host = message_json[\"sender_host\"]\n sender_port = message_json[\"sender_port\"]\n\n debug_print(\"recieved heartbeat from {}\".format(sender_id))\n\n if mem_list.contains_node(sender_id):\n # check that the node is alive so we do not revive a failed node\n if mem_list.is_alive(sender_id):\n mem_list.update_state(sender_id, Status.ALIVE, seqnum, time.time())\n else:\n mem_list.add_node(sender_id, sender_host, sender_port)\n\n elif message_type == MessageType.REPORT.value:\n sender_id = message_json[\"sender_id\"]\n file_list = message_json[\"file_list\"]\n debug_print(f\"recieved file list at {sender_id}: {file_list}\")\n with mem_list.lock:\n debug_print(f'{sender_id in mem_list.nodes.keys()}, {mem_list.nodes[sender_id].status == Status.ALIVE}')\n condition = sender_id in mem_list.nodes.keys() and mem_list.nodes[sender_id].status == Status.ALIVE\n if condition:\n with meta_list.lock:\n meta_list.update_node_file_list(sender_id, file_list)\n debug_print(meta_list)\n\n elif message_type == MessageType.LEAVE.value:\n sender_id = message_json[\"sender_id\"]\n mem_list.mark_left(sender_id)\n\n # Master get put request from client\n elif message_type == MessageType.PUT.value:\n debug_print('Receives PUT from', address)\n sender_id = message_json[\"sender_id\"]\n sender_host = message_json[\"sender_host\"]\n sender_port = message_json[\"sender_port\"]\n sdfs_fname = message_json[\"sdfs_fname\"]\n\n # with mem_list.lock and meta_list.lock:\n target_hosts, target_ports, response_status = All2All.calc_put_response(sdfs_fname, sender_id, mem_list, meta_list)\n debug_print(target_hosts, target_ports, response_status.value)\n # reply to PUT initiator with list of addr for storing the replicas\n All2All.send_put_response(sender_host, sender_port, target_hosts, target_ports, response_status)\n \n debug_print('Sent PUT_RES to', address)\n\n # Client get put response from master\n elif message_type == MessageType.PUT_RES.value:\n debug_print('Receives PUT_RES from', address)\n target_hosts = message_json[\"target_hosts\"].split(' ')\n target_ports = message_json[\"target_ports\"].split(' ')\n response_status = message_json[\"response_status\"]\n\n if response_status == ResponseStatus.CONFLICT:\n debug_print(\"PUT command failed: Write-read or write-write conflict. Wait and try again. (see @908)\")\n return\n if response_status == ResponseStatus.FILE_NOT_FOUND:\n debug_print(\"GET command failed: File does not exist.\")\n return\n\n if not target_hosts[0]:\n debug_print(\"No one wants the file.\")\n return\n else:\n debug_print(f'{target_hosts} wants file')\n\n All2All.transfer_file(target_hosts, target_ports, writing_local_fname, writing_sdfs_fname)\n All2All.send_put_success(address, self_id, writing_sdfs_fname)\n\n debug_print('Sent PUT_SUCCESS to', address)\n\n # Master get put success from client\n elif message_type == MessageType.PUT_SUCCESS.value:\n debug_print('Receives PUT_SUCCESS from', address)\n sender_id = message_json[\"sender_id\"]\n sdfs_fname = message_json[\"sdfs_fname\"]\n with meta_list.lock:\n meta_list.files[sdfs_fname].status = FileStatus.IDLE\n if sender_id in meta_list.files[sdfs_fname].writers:\n meta_list.files[sdfs_fname].writers.remove(sender_id)\n\n elif message_type == MessageType.GET.value:\n debug_print('Receives GET from', address)\n sender_id = message_json[\"sender_id\"]\n sender_host = message_json[\"sender_host\"]\n sender_port = message_json[\"sender_port\"]\n sdfs_fname = message_json[\"sdfs_fname\"]\n\n debug_print(meta_list.files)\n target_hosts, target_ports, response_status = All2All.calc_get_response(sdfs_fname, sender_id, mem_list, meta_list)\n debug_print(target_hosts, target_ports, response_status.value)\n # reply to PUT initiator with list of addr for storing the replicas\n All2All.send_get_response(sender_host, sender_port, target_hosts, target_ports, response_status)\n \n debug_print('Sent GET_RES to', address)\n\n # Client get put response from master\n elif message_type == MessageType.GET_RES.value:\n debug_print('Receives GET_RES from', address)\n target_hosts = message_json[\"target_hosts\"].split(' ')\n target_ports = message_json[\"target_ports\"].split(' ')\n response_status = message_json[\"response_status\"]\n\n if response_status == ResponseStatus.CONFLICT:\n debug_print(\"GET command failed: Write-read conflict. Wait and try again. (see @908)\")\n return\n if response_status == ResponseStatus.FILE_NOT_FOUND:\n debug_print(\"GET command failed: File does not exist.\")\n return\n\n if not target_hosts[0]:\n debug_print(\"No one has the file or file not availble.\")\n return\n else:\n debug_print(f'{target_hosts} has file')\n\n All2All.request_file(target_hosts, target_ports, reading_local_fname, reading_sdfs_fname)\n All2All.send_get_success(address, self_id, writing_sdfs_fname)\n\n debug_print('Sent REQ to', address)\n\n elif message_type == MessageType.GET_SUCCESS.value:\n debug_print('Receives GET_SUCCESS from', address)\n sender_id = message_json[\"sender_id\"]\n sdfs_fname = message_json[\"sdfs_fname\"]\n with meta_list.lock:\n meta_list.files[sdfs_fname].status = FileStatus.IDLE\n if sender_id in meta_list.files[sdfs_fname].readers:\n meta_list.files[sdfs_fname].readers.remove(sender_id)\n\n elif message_type == MessageType.REPLICATE.value:\n sdfs_fname = message_json[\"sdfs_fname\"]\n target_ports = message_json[\"target_ports\"]\n target_hosts = message_json[\"target_hosts\"]\n\n if not target_hosts[0]:\n debug_print(\"No one wants the file.\")\n return\n \n debug_print(f'{target_hosts} wants file')\n All2All.request_file(target_hosts, target_ports, f'sdfs/{sdfs_fname}', sdfs_fname, ignore_self=True)\n\n elif message_type == MessageType.DELETE.value:\n sdfs_fname = message_json[\"sdfs_fname\"]\n for node_id in meta_list.files[sdfs_fname].ids:\n All2All.send_delete_request(sdfs_fname, node_id)\n\n elif message_type == MessageType.DELETE_REQ.value:\n sdfs_fname = message_json[\"sdfs_fname\"]\n os.remove(f'sdfs/{sdfs_fname}')\n\n ## list all VM addresses containing sdfs_fname\n elif message_type == MessageType.LS.value:\n sdfs_fname = message_json[\"sdfs_fname\"]\n sender_id = message_json[\"sender_id\"]\n client_host, client_port = id_to_address(sender_id)\n ls_result = \"\"\n if sdfs_fname in meta_list.files:\n for node_id in meta_list.files[sdfs_fname].ids:\n ls_result += f'{node_id}\\n'\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:\n address = (client_host, client_port)\n message = create_message(\n MessageType.LS_RES,\n sdfs_fname=sdfs_fname,\n ls_result=ls_result\n )\n sock.sendto(message, address)\n\n elif message_type == MessageType.LS_RES.value:\n sdfs_fname = message_json[\"sdfs_fname\"]\n ls_result = message_json[\"ls_result\"]\n demo_print(\"LS of {}\".format(sdfs_fname))\n demo_print(ls_result)\n\n # Let master know this client want list of nodes with sdfs_fname\n @staticmethod\n def send_get(sdfs_fname, sender_id, sender_host, sender_port, server_host, server_port, fail_rate=0.0):\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:\n address = (server_host, server_port)\n message = create_message(\n MessageType.GET,\n sdfs_fname=sdfs_fname,\n sender_id=sender_id,\n sender_port=sender_port,\n sender_host=sender_host,\n )\n\n # send message unless dropped, according to fail_rate\n if random.random() >= fail_rate:\n sock.sendto(message, address)\n\n @staticmethod\n def calc_get_response(sdfs_fname, sender_id, mem_list, meta_list):\n # return list of target hosts and corresponding ports\n target_hosts = []\n target_ports = []\n\n # get alive nodes in mem_list\n alive_nodes = {}\n with mem_list.lock:\n for id, row in mem_list.nodes.items():\n if row.status == Status.ALIVE:\n alive_nodes[id] = row.to_dict()\n\n # mark the file as reading\n with meta_list.lock:\n # if sdfs_fname given matches no file in SDFS\n if sdfs_fname not in meta_list.files.keys():\n return target_hosts, target_ports, ResponseStatus.FILE_NOT_FOUND\n \n if meta_list.files[sdfs_fname].status == FileStatus.WRITING:\n return target_hosts, target_ports, ResponseStatus.CONFLICT\n\n meta_list.files[sdfs_fname].status = FileStatus.READING\n meta_list.files[sdfs_fname].readers.append(sender_id)\n\n file_info = meta_list.files[sdfs_fname].to_dict()\n # update alive node\n for id, alive_node in alive_nodes.items():\n if id in file_info[\"ids\"]:\n target_hosts.append(alive_node[\"host\"])\n target_ports.append(alive_node[\"port\"])\n \n return target_hosts, target_ports, ResponseStatus.OKAY\n\n # Master replies\n @staticmethod\n def send_get_response(sender_host, sender_port, target_hosts, target_ports, response_status, fail_rate=0.0):\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:\n address = (sender_host, sender_port)\n message = create_message(\n MessageType.GET_RES,\n target_hosts=list_to_str(target_hosts),\n target_ports=list_to_str(target_ports),\n response_status=response_status.value\n )\n\n # send message unless dropped, according to fail_rate\n if random.random() >= fail_rate:\n sock.sendto(message, address)\n\n @staticmethod\n def send_get_success(address, sender_id, sdfs_fname, fail_rate=0.0):\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:\n message = create_message(\n MessageType.GET_SUCCESS,\n sdfs_fname=sdfs_fname,\n sender_id=sender_id\n )\n\n # send message unless dropped, according to fail_rate\n if random.random() >= fail_rate:\n sock.sendto(message, address)\n\n # get a file from another place, whether a client receives a GET_RES or REPLICATE\n @staticmethod\n def request_file(target_hosts, target_ports, local_fname, sdfs_fname, ignore_self=False, fail_rate=0.0):\n # raise Exception(\"request_file not implemented\")\n start_time = time.time()\n flag = False\n for i in range(len(target_hosts)):\n if not flag:\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n host = target_hosts[i]\n if host == socket.gethostname() and ignore_self:\n flag = 1\n continue\n\n port = int(target_ports[i]) + 11\n debug_print('Trying to connect to', (host, port))\n try:\n sock.connect((host, port))\n except:\n sock.close()\n continue\n sock.send(f'{sdfs_fname}'.encode())\n bytes_expected = int(sock.recv(1024).decode())\n\n # sending file over TCP with socket.sendall()\n with open(local_fname, 'wb') as f:\n # send file information\n debug_print(f'Receiving {sdfs_fname} of size {bytes_expected}')\n bytes_received = 0\n while bytes_received < bytes_expected:\n data = sock.recv(1024)\n f.write(data)\n bytes_received += len(data)\n debug_print(f'Receiving {sdfs_fname} of size {bytes_expected} {bytes_received} / {bytes_expected} B')\n\n sock.close()\n flag = True\n end_time = time.time()\n demo_print(f'get time for {local_fname}: {end_time - start_time}')\n\n # Let master know this client want list of nodes with sdfs_fname, or places to store replicas\n @staticmethod\n def send_put(sdfs_fname, sender_id, sender_host, sender_port, server_host, server_port, fail_rate=0.0):\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:\n address = (server_host, server_port)\n message = create_message(\n MessageType.PUT,\n sdfs_fname=sdfs_fname,\n sender_id=sender_id,\n sender_port=sender_port,\n sender_host=sender_host,\n )\n\n # send message unless dropped, according to fail_rate\n if random.random() >= fail_rate:\n sock.sendto(message, address)\n\n # Master calculates a list of nodes\n @staticmethod\n def calc_put_response(sdfs_fname, sender_id, mem_list, meta_list):\n # return list of target hosts and corresponding ports\n target_hosts = []\n target_ports = []\n\n # get alive nodes in mem_list\n alive_nodes = {}\n with mem_list.lock:\n for id, row in mem_list.nodes.items():\n if row.status == Status.ALIVE:\n alive_nodes[id] = row.to_dict()\n\n with meta_list.lock:\n # if sdfs_fname given matches any file in SDFS\n if sdfs_fname in meta_list.files.keys():\n if meta_list.files[sdfs_fname].status == FileStatus.IDLE:\n # mark the file as writing\n meta_list.files[sdfs_fname].status = FileStatus.WRITING\n meta_list.files[sdfs_fname].writers.append(sender_id)\n file_info = meta_list.files[sdfs_fname].to_dict()\n # update alive node with file\n for id, alive_node in alive_nodes.items():\n if id in file_info[\"ids\"]:\n target_hosts.append(alive_node[\"host\"])\n target_ports.append(alive_node[\"port\"])\n else:\n return target_hosts, target_ports, ResponseStatus.CONFLICT\n # new file\n else:\n # select 4 machines randomly\n alive_nodes = random.sample(alive_nodes.items(), min(4, len(alive_nodes)))\n for id, alive_node in alive_nodes:\n target_hosts.append(alive_node[\"host\"])\n target_ports.append(alive_node[\"port\"])\n \n return target_hosts, target_ports, ResponseStatus.OKAY\n\n # reply to PUT initiator with list of addr for storing the replicas\n @staticmethod\n def send_put_response(sender_host, sender_port, target_hosts, target_ports, response_status, fail_rate=0.0):\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:\n message = create_message(\n MessageType.PUT_RES,\n target_hosts=list_to_str(target_hosts),\n target_ports=list_to_str(target_ports),\n response_status=response_status.value\n )\n\n # send message unless dropped, according to fail_rate\n if random.random() >= fail_rate:\n sock.sendto(message, (sender_host, sender_port))\n\n # send local file to local/remote sdfs folder\n @staticmethod\n def transfer_file(target_hosts, target_ports, writing_local_fname, writing_sdfs_fname):\n start_time = time.time()\n for i in range(len(target_hosts)):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n host = target_hosts[i]\n # if host == socket.gethostname():\n # continue\n\n port = int(target_ports[i]) + 10\n debug_print('Trying to connect to', (host, port))\n sock.connect((host, port))\n\n # sending file over TCP with socket.sendall()\n with open(writing_local_fname, 'rb') as f:\n # send file information\n debug_print(f'Sending {writing_sdfs_fname} of size {os.path.getsize(writing_local_fname)}')\n data = writing_sdfs_fname + ' ' + str(os.path.getsize(writing_local_fname))\n sock.send(data.encode())\n time.sleep(0.01)\n while True:\n bytes_read = f.read(1024)\n if not bytes_read:\n # file transmitting is done\n break\n sock.sendall(bytes_read)\n\n sock.close()\n end_time = time.time()\n demo_print(f'put time for size {os.path.getsize(writing_local_fname)}: {end_time - start_time}')\n\n # reply to master after successfully transferring file\n @staticmethod\n def send_put_success(address, sender_id, sdfs_fname, fail_rate=0.0):\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:\n message = create_message(\n MessageType.PUT_SUCCESS,\n sdfs_fname=sdfs_fname,\n sender_id=sender_id\n )\n\n # send message unless dropped, according to fail_rate\n if random.random() >= fail_rate:\n sock.sendto(message, address)\n\n # reply to master after successfully transferring file\n @staticmethod\n def send_replicate(sdfs_fname, host, port, target_ids, fail_rate=0.0):\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:\n target_hosts = [id_to_address(id)[0] for id in target_ids]\n target_ports = [id_to_address(id)[1] for id in target_ids]\n message = create_message(\n MessageType.REPLICATE,\n sdfs_fname=sdfs_fname,\n target_hosts=target_hosts,\n target_ports=target_ports\n )\n\n # send message unless dropped, according to fail_rate\n if random.random() >= fail_rate:\n sock.sendto(message, (host, port))\n\n # send_message() is ONLY for HEARTBEAT\n @staticmethod\n def send_message(sender_id, mem_list, sock, sender_host, sender_port, seqnum, fail_rate=0.0):\n # debug_print(\"sending heartbeat\")\n for node_id, row in mem_list:\n # only send a heartbeat to other, alive nodes\n if node_id == sender_id or not mem_list.is_alive(node_id):\n continue\n\n server_addr = (row.host, row.port)\n message = create_message(\n MessageType.HEARTBEAT,\n sender_id=sender_id,\n seqnum=seqnum,\n sender_host=sender_host,\n sender_port=sender_port,\n )\n\n # send message unless dropped, according to fail_rate\n if random.random() >= fail_rate:\n sock.sendto(message, server_addr)\n\n # send my list of files to the master node\n @staticmethod\n def send_report(sender_id, sender_host, sender_port, master_id, master_host, master_port, sock, report_num):\n # NOTE darci: master needs to check its own list at some point and\n # we need to make sure we actually delete files or we could keep track \n # of files not with the sdfs directory, but with a list in-memory\n if sender_id == master_id:\n return\n\n file_list = [f for f in os.listdir('sdfs') if os.path.isfile(os.path.join('sdfs', f))]\n \n server_addr = (master_host, master_port)\n message = create_message(\n MessageType.REPORT,\n sender_id=sender_id,\n report_num=report_num,\n sender_host=sender_host,\n sender_port=sender_port,\n file_list=file_list,\n )\n\n sock.sendto(message, server_addr)\n\n @staticmethod\n def send_message_interval(failure_detection_time, dissemination_time) -> float:\n # Send to all other nodes within half of dissemination_time\n return dissemination_time / 2\n\n @staticmethod\n def leave_group(sender_id, mem_list):\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:\n message = create_message(MessageType.LEAVE, sender_id=sender_id)\n for node_id, row in mem_list:\n # only send a leave message to other, alive nodes\n if node_id == sender_id or not mem_list.is_alive(node_id):\n continue\n\n server_addr = (row.host, row.port)\n sock.sendto(message, server_addr)\n\n # clear the membership list. allows rejoining without restarting the program.\n mem_list.clear()\n\n @staticmethod\n def timeout_interval(failure_detection_time, dissemination_time) -> float:\n return failure_detection_time\n\n @staticmethod\n def send_delete(sdfs_fname, master_host, master_port, fail_rate=0.0):\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:\n message = create_message(\n MessageType.DELETE,\n sdfs_fname=sdfs_fname\n )\n\n # send message unless dropped, according to fail_rate\n if random.random() >= fail_rate:\n sock.sendto(message, (master_host, master_port))\n\n @staticmethod\n def send_delete_request(sdfs_fname, node_id, fail_rate=0.0):\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:\n message = create_message(\n MessageType.DELETE_REQ,\n sdfs_fname=sdfs_fname\n )\n\n # send message unless dropped, according to fail_rate\n if random.random() >= fail_rate:\n sock.sendto(message, id_to_address(node_id))\n\n @staticmethod\n def send_ls_request(sdfs_fname, sender_id, master_host, master_port):\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:\n message = create_message(\n MessageType.LS,\n sdfs_fname=sdfs_fname,\n sender_id=sender_id\n )\n\n sock.sendto(message, (master_host, master_port))\n ","repo_name":"ishaandatta/All_Projects","sub_path":"Simplified Distributed File System/all2all.py","file_name":"all2all.py","file_ext":"py","file_size_in_byte":24503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"21682890167","text":"import numpy as np\nimport data_analysis\nimport control_flow\nimport time\n\ntick = time.time()\n\ndata_dir = 'raw_data'\nplot_dir = 'plots'\n#name = 'bernoulli_ei' # change later for generality\nname, n, T_ms, R, q = control_flow.plotting_args()\nm = int(n/4)\n\n# raw spiking data\nbin_step = 1\ntime_bins = np.arange(0,T_ms+bin_step,bin_step)\nf_exc = np.zeros([int(T_ms/bin_step)],dtype = int)\nf_inh = np.zeros([int(T_ms/bin_step)], dtype = int)\n\nbig_bin = 20 # time bins used for pairwise comparisons\ntime_bins_big = np.arange(0,T_ms+big_bin,big_bin)\nexc_coeff_all = []\ninh_coeff_all = []\nexc_fr_all = []\ninh_fr_all = []\n\nfor i in np.arange(R): \n data_exc_spike = np.load('%s/%s_raw_exc_spikes_q=%s_R=%s.npy'%(data_dir,name,q,i)).T\n data_inh_spike = np.load('%s/%s_raw_inh_spikes_q=%s_R=%s.npy'%(data_dir,name,q,i)).T\n ts_s_exc = data_exc_spike[0]\n send_exc = data_exc_spike[1]\n ts_s_inh = data_inh_spike[0]\n send_inh = data_inh_spike[1]\n f_exc += np.histogram(ts_s_exc,bins=time_bins)[0]\n f_inh += np.histogram(ts_s_inh,bins=time_bins)[0]\n\n exc_st = data_analysis.st_extract(send_exc,ts_s_exc,n,0)\n inh_st = data_analysis.st_extract(send_inh,ts_s_inh,m,n)\n _, exc_coeff = data_analysis.pair_correlate(exc_st,time_bins_big)\n _, inh_coeff = data_analysis.pair_correlate(inh_st,time_bins_big)\n exc_coeff_all += list(exc_coeff)\n inh_coeff_all += list(inh_coeff)\n # isi intervals\n exc_isi = data_analysis.isi_extract(send_exc,ts_s_exc,n,0)\n inh_isi = data_analysis.isi_extract(send_inh,ts_s_inh,m,n)\n exc_fr_mean = [1000/np.mean(xi) for xi in exc_isi]\n inh_fr_mean = [1000/np.mean(xi) for xi in inh_isi]\n exc_fr_all += list(exc_fr_mean)\n inh_fr_all += list(inh_fr_mean)\n\n# fr hists\ndata_analysis.fr_hist_plot('%s/%s_q=%s_epop'%(plot_dir,name,q),q,exc_fr_all)\ndata_analysis.fr_hist_plot('%s/%s_q=%s_ipop'%(plot_dir,name,q),q,inh_fr_all)\n\n# spike psds\ndata_analysis.spike_psd_plot('%s/%s_q=%s_epop'%(plot_dir,name,q),time_bins[1:],f_exc)\ndata_analysis.spike_psd_plot('%s/%s_q=%s_ipop'%(plot_dir,name,q),time_bins[1:],f_inh)\n\n# spike pairwise correlations\ndata_analysis.coefficient_histogram('%s/%s_q=%s_epop'%(plot_dir,name,q),q,exc_coeff_all)\ndata_analysis.coefficient_histogram('%s/%s_q=%s_ipop'%(plot_dir,name,q),q,inh_coeff_all)\n\n# voltage means and variances\ndata_mom = np.loadtxt('%s/%s_vmom_epop_q=%s.txt'%(data_dir,name,q)).T\ndata_analysis.voltage_time_plots('%s/%s_q=%s_epop'%(plot_dir,name,q),data_mom[1],data_mom[2],data_mom[0])\ndata_mom = np.loadtxt('%s/%s_vmom_ipop_q=%s.txt'%(data_dir,name,q)).T\ndata_analysis.voltage_time_plots('%s/%s_q=%s_ipop'%(plot_dir,name,q),data_mom[1],data_mom[2],data_mom[0])\n\ndata_analysis.double_raster('%s/%s_q=%s'%(plot_dir,name,q),ts_s_exc,send_exc,ts_s_inh,send_inh)\n\ntock = time.time()\nprint(tock-tick)","repo_name":"rpgowers/ocnc-network-motif","sub_path":"data_plotting.py","file_name":"data_plotting.py","file_ext":"py","file_size_in_byte":2754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"4385071171","text":"from flask import Flask, request, jsonify\nimport base64\nimport cv2\nfrom matplotlib import pyplot as plt\nimport io\nfrom imageio import imread\nimport numpy as np\nfrom PIL import Image, ImageFile\nimport requests\nimport json\nImageFile.LOAD_TRUNCATED_IMAGES = True\napp = Flask(\"__name__\")\nnarray = np.array([])\n@app.route('/both', methods = [\"POST\"])\ndef both():\n d={}\n similarity = \"NO\"\n request_data = request.data\n request_data = json.loads(request_data.decode('utf-8'))\n image_1 = request_data['image1']\n print(type(image_1))\n decoded_image1= base64.b64decode(image_1)\n i1 = Image.open(io.BytesIO(decoded_image1))\n #i1.show()\n img1 = np.array(i1) \n print(\"IMAGE 1 IS: \")\n print(img1)\n gray_image1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)\n size = gray_image1.size\n hist1= cv2.calcHist([gray_image1], [0],None, [256], [0, 256])\n image_2 = request_data['image2']\n decoded_image2= base64.b64decode(image_2)\n i2 = Image.open(io.BytesIO(decoded_image2))\n #i2.show()\n img2 = np.array(i2)\n print('IMAGE 2 IS : ')\n print(img2)\n gray_image2= cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)\n hist2 = cv2.calcHist([gray_image2],[0],None, [256], [0, 256])\n c=0\n i = 0\n while i<len(hist1) and i<len(hist2):\n c += (hist1[i]-hist2[i])**2\n i+=1\n c= c**(1/2)\n c = (c*100)/size\n print('C IS: ')\n print(c)\n if(c<=0.09):\n similarity = \"YES\"\n else:\n similarity = \"NO\"\n print(\"SIMILARITY IS:\")\n print(similarity)\n d['output']=similarity\n return d\n # if(image_1!=decoded_image1 and image_2 != decoded_image2):\n # d['output']= \"success\"\n # else :\n # d['output']= \"not\"\n # return d\n# @app.route('/imageone',methods = [\"GET\"])\n# def imageone():\n# d={}\n# image1base = str(request.args['image1'])\n# image2base = str(request.args['image2'])\n# decodedImage1=base64.b64decode((image1base))\n# img1= imread(io.BytesIO(decodedImage1))\n# #narray= np.append(img) \n# cv2_img = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)\n# d['output']=\"success\"\n# return d\n# @app.route('/imagetwo',methods = [\"GET\"])\n# def imagetwo():\n# d={}\n# image2base = str(request.args['image2'])\n# decodedImage2=base64.b64decode((image2base))\n# img2= imread(io.BytesIO(decodedImage2))\n# img22 = np.asarray(img2)\n# # narray = np.append(img22)\n# # print(\"size of narray is :\")\n# # print(narray.size) \n# cv2_img2 = cv2.cvtColor(img22, cv2.COLOR_BGR2GRAY)\n# d['output']=\"success\"\n# return d\n\n\nif __name__ ==\"__main__\":\n app.run(debug = True)\n\n\n# def from_base64(base64_data):\n# nparr = np.fromstring(base64_data.decode('base64'), np.uint8)\n# return cv2.imdecode(nparr, cv2.IMREAD_ANYCOLOR)\n\n\n\n\n# b64_bytes = base64.b64encode(data)\n# b64_string = b64_bytes.decode()\n\n# # reconstruct image as an numpy array\n# img = imread(io.BytesIO(base64.b64decode(b64_string)))\n\n# # show image\n# plt.figure()\n# plt.imshow(img, cmap=\"gray\")\n\n# # finally convert RGB image to BGR for opencv\n# # and save result\n# cv2_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n# cv2.imwrite(\"reconstructed.jpg\", cv2_img)\n# plt.show()\n\n\n\n \n\n # img = Image.open(io.BytesIO(base64.decodebytes(bytes(image_1, \"utf-8\"))))\n # print(type(img))\n # img.save('my-image.jpg')\n # print(\"IMAGE1 IS :\")","repo_name":"udaykirran12/Free_hand_password_authentication","sub_path":"api/api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"8176800982","text":"\"\"\"AnsibleRunner workflow class.\"\"\"\n\nimport logging\n\nimport click\nfrom .. import config\nfrom ..utilities.state import TypeState\nfrom . import process\n\n\nclass AnsibleRunner:\n \"\"\"AnsibleRunner workflow class.\n\n :param state: The loaded runtime state object object.\n :param debug: Enable or disable logs.\n \"\"\"\n\n def __init__(self, state: TypeState, debug: bool = False):\n self.log = logging.getLogger(config.LOGGER_NAME)\n self.debug = debug\n self.state = state\n\n def start(self) -> None:\n \"\"\"Start the Ansible provisioning workflow.\"\"\"\n\n galaxy_roles_command = self._construct_galaxy_roles_command()\n galaxy_col_command = self._construct_galaxy_col_command()\n playbook_command = self._construct_ansible_playbook_command()\n\n self._do_install_galaxy_roles(galaxy_roles_command)\n self._do_install_galaxy_col(galaxy_col_command)\n self._do_ansible_playbook(playbook_command)\n\n def _construct_galaxy_roles_command(self) -> str:\n\n requirements_file = self.state['galaxy_requirements_file']\n role_path = self.state['roles_path'][0]\n self.log.debug(\n \"AnsibleRunner: Reading Profile requirements from: %s.\",\n requirements_file,\n )\n command = (\n f\"ansible-galaxy role install -r {requirements_file}\"\n f\" -p {role_path}\"\n )\n return command\n\n def _construct_galaxy_col_command(self) -> str:\n\n requirements_file = self.state['galaxy_requirements_file']\n col_path = self.state['collections_path'][0]\n self.log.debug(\n \"AnsibleRunner: Reading Profile requirements from: %s.\",\n requirements_file,\n )\n command = (\n f\"ansible-galaxy collection install -r {requirements_file}\"\n f\" -p {col_path}\"\n )\n return command\n\n def _construct_ansible_playbook_command(self) -> str:\n\n self.log.debug(\"AnsibleRunner: Invoking Ansible.\")\n command = (\n f\"ansible-playbook {self.state['playbook']}\"\n f\" -i {self.state['inventory']}\"\n \" -e \"\n \"\\\"ansible_become_password=\"\n \"'{{ lookup('env', 'ANSIBLE_BECOME_PASSWORD') }}'\\\"\"\n )\n if self.debug:\n command += \" -vvvv\"\n return command\n\n def _do_install_galaxy_roles(self, galaxy_command: str) -> None:\n controller = process.AnsibleProcess(\n config.ANSIBLE_LIBRARY_GALAXY_MODULE,\n config.ANSIBLE_LIBRARY_GALAXY_CLASS,\n self.state,\n )\n\n click.echo(config.ANSIBLE_ROLES_MESSAGE)\n controller.spawn(galaxy_command)\n self.log.debug(\n \"AnsibleRunner: Profile Ansible Galaxy roles have been \"\n \"installed to: %s.\",\n self.state['roles_path'][0],\n )\n\n def _do_install_galaxy_col(self, galaxy_command: str) -> None:\n controller = process.AnsibleProcess(\n config.ANSIBLE_LIBRARY_GALAXY_MODULE,\n config.ANSIBLE_LIBRARY_GALAXY_CLASS,\n self.state,\n )\n\n click.echo(config.ANSIBLE_COLLECTIONS_MESSAGE)\n controller.spawn(galaxy_command)\n self.log.debug(\n \"AnsibleRunner: Profile Ansible Galaxy collections have been \"\n \"installed to: %s.\",\n self.state['collections_path'][0],\n )\n\n def _do_ansible_playbook(self, ansible_command: str) -> None:\n controller = process.AnsibleProcess(\n config.ANSIBLE_LIBRARY_PLAYBOOK_MODULE,\n config.ANSIBLE_LIBRARY_PLAYBOOK_CLASS,\n self.state,\n )\n\n click.echo(config.ANSIBLE_INVOKE_MESSAGE)\n controller.spawn(ansible_command)\n self.log.debug(\"AnsibleRunner: Ansible Playbook has finished.\",)\n","repo_name":"osx-provisioner/mac_maker","sub_path":"mac_maker/ansible_controller/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":3464,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"96"} +{"seq_id":"30673324824","text":"import math\nimport random\nimport sys\nimport pygame\nimport time\n\nclass Game:\n\tdef __init__(self):\n\t\tself.points = 0\n\t\tself.apple = self.apple_position = [30 * random.randint(0, 19), 30 * random.randint(0, 19)] \n\t\tself.apple_rect = pygame.Rect(self.apple_position[0], self.apple_position[1], 30, 30)\n\n\tdef reset_apple(self):\n\t\tself.apple_position = [30 * random.randint(0, 19), 30 * random.randint(0, 19)] \n\t\tself.apple_rect.update(self.apple_position[0], self.apple_position[1], 30, 30)\n\nclass Snake:\n\tdef __init__(self):\n\t\tself.length = 1\n\t\tself.size = 30\n\t\tself.direction = 0\n\t\tself.body = [pygame.Rect(300, 300, self.size, self.size)]\n\t\tself.velocity = [1, 0]\n\n\tdef update_velocity(self):\n\t\tif (int(self.body[0].left) % int(self.size)) + (int(self.body[0].top) % int(self.size)) == 0:\n\t\t\tif self.direction == 0:\n\t\t\t\tself.velocity = [1, 0]\n\t\t\telif self.direction == 1:\n\t\t\t\tself.velocity = [-1, 0]\n\t\t\telif self.direction == 2:\n\t\t\t\tself.velocity = [0, -1]\n\t\t\telif self.direction == 3:\n\t\t\t\tself.velocity = [0, 1]\n\t\t\t\n\t\t\treturn True\n\t\t\n\t\treturn False\n\t\n\tdef move(self):\n\t\tnum_segments = len(self.body)\n\t\tfor i in range(num_segments - 1, 0, -1):\t\n\t\t\tif self.body[i].left % self.size == 0 and self.body[i].top % self.size != 0:\n\t\t\t\tif self.body[i - 1].top > self.body[i].top:\t\n\t\t\t\t\tself.body[i].move_ip(0, 1)\n\t\t\t\telif self.body[i - 1].top < self.body[i].top: \n\t\t\t\t\tself.body[i].move_ip(0, -1)\t\n\t\t\telif self.body[i].left % self.size != 0 and self.body[i].top % self.size == 0:\n\t\t\t\tif self.body[i - 1].left > self.body[i].left:\n\t\t\t\t\tself.body[i].move_ip(1, 0)\n\t\t\t\telif self.body[i - 1].left < self.body[i].left: \n\t\t\t\t\tself.body[i].move_ip(-1, 0)\n\t\t\telse:\n\t\t\t\tif self.body[i - 1].left > self.body[i].left:\n\t\t\t\t\tself.body[i].move_ip(1, 0)\n\t\t\t\telif self.body[i - 1].left < self.body[i].left: \n\t\t\t\t\tself.body[i].move_ip(-1, 0)\n\t\t\t\telif self.body[i - 1].top > self.body[i].top:\t\n\t\t\t\t\tself.body[i].move_ip(0, 1)\n\t\t\t\telif self.body[i - 1].top < self.body[i].top: \n\t\t\t\t\tself.body[i].move_ip(0, -1)\t\n\t\t\n\t\tself.body[0].move_ip(self.velocity[0], self.velocity[1])\n\t\n\tdef update_direction(self, direction):\n\t\tif self.direction + direction == 1 or self.direction + direction == 5:\n\t\t\treturn\n\t\n\t\tself.direction = direction\n\n\tdef check_body_collision(self):\n\t\tnum_segments = len(self.body)\n\t\tfor i in range(2, num_segments):\n\t\t\tif self.body[i].colliderect(self.body[0]):\n\t\t\t\treturn True\t\n\t\treturn False\n\n\tdef append_segment(self):\t\t\n\t\tnum_segments = len(self.body)\n\t\ttail = pygame.Rect.copy(self.body[num_segments - 1])\t\n\t\tif num_segments < 2:\t\n\t\t\ttail.move_ip(self.size * -self.velocity[0], self.size * -self.velocity[1])\n\t\telse:\n\t\t\tif self.body[num_segments - 1].left % self.size == 0 and self.body[num_segments - 1].top % self.size != 0:\n\t\t\t\tif self.body[num_segments - 2].top > self.body[num_segments - 1].top:\t\n\t\t\t\t\ttail.move_ip(0, -self.size)\n\t\t\t\telif self.body[num_segments - 2].top < self.body[num_segments - 1].top: \n\t\t\t\t\ttail.move_ip(0, self.size)\t\n\t\t\telif self.body[num_segments - 1].left % self.size != 0 and self.body[num_segments - 1].top % self.size == 0:\n\t\t\t\tif self.body[num_segments - 2].left > self.body[num_segments - 1].left:\n\t\t\t\t\ttail.move_ip(-self.size, 0)\n\t\t\t\telif self.body[num_segments - 2].left < self.body[num_segments - 1].left: \n\t\t\t\t\ttail.move_ip(self.size, 0)\n\t\t\telse:\n\t\t\t\tif self.body[num_segments - 2].left > self.body[num_segments - 1].left:\n\t\t\t\t\ttail.move_ip(-self.size, 0)\n\t\t\t\telif self.body[num_segments - 2].left < self.body[num_segments - 1].left: \n\t\t\t\t\ttail.move_ip(self.size, 0)\n\t\t\t\telif self.body[num_segments - 2].top > self.body[num_segments - 1].top:\t\n\t\t\t\t\ttail.move_ip(0, -self.size)\n\t\t\t\telif self.body[num_segments - 2].top < self.body[num_segments - 1].top: \n\t\t\t\t\ttail.move_ip(0, self.size)\t\n\t\t\t\t\t\n\t\tself.body.append(tail)\t\n\ndef current_milli_time():\n\treturn round(time.time() * 1000)\n\ndef main():\n\tpygame.init()\n\tscreen = pygame.display.set_mode(size=(600,600))\n\tgame_bounds = pygame.Rect(0, 0, 600, 600)\t\n\tsnake = Snake()\n\tgame = Game()\n\tupdate_rate = 2 \n\n\tlast_tick = current_milli_time() \n\tdirection_queue = []\t\n\twhile 1:\n\t\tcurrent_tick = current_milli_time()\n\t\tpressed_keys = pygame.key.get_pressed()\n\n\t\tif snake.body[0].colliderect(game.apple_rect):\n\t\t\tgame.points += 1\n\t\t\tgame.reset_apple()\n\t\t\tvalid_pos = False\t\n\t\t\twhile not valid_pos:\t\n\t\t\t\tvalid_pos = True\t\n\t\t\t\tfor r in snake.body:\n\t\t\t\t\tif r.colliderect(game.apple_rect):\n\t\t\t\t\t\tgame.reset_apple()\n\t\t\t\t\t\tvalid_pos = False\n\t\t\tsnake.append_segment()\n\t\t\n\t\tif not game_bounds.contains(snake.body[0]) or snake.check_body_collision():\n\t\t\tsnake = Snake()\n\t\t\tgame = Game()\n\t\t\tlast_tick = current_milli_time()\n\t\t\tdirection_queue = []\t\n\t\t\tcontinue\n\t\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tsys.exit()\n\t\t\telif event.type == pygame.KEYDOWN and len(direction_queue) < 2:\n\t\t\t\tif event.key == pygame.K_RIGHT:\n\t\t\t\t\tdirection_queue.append(0)\t\n\t\t\t\telif event.key == pygame.K_LEFT:\n \t\t\t\t\tdirection_queue.append(1) \n\t\t\t\telif event.key == pygame.K_UP:\n \t\t\t\t\tdirection_queue.append(2) \n\t\t\t\telif event.key == pygame.K_DOWN:\n\t\t\t\t\tdirection_queue.append(3)\n\t\t\t\t\n\t\tif current_tick - last_tick > update_rate:\n\t\t\tif len(direction_queue) > 0:\n\t\t\t\tsnake.update_direction(direction_queue[0])\n\t\t\t\n\t\t\tif snake.update_velocity() and len(direction_queue) > 0:\n\t\t\t\tdirection_queue.pop(0)\n\t\t\t\n\t\t\tsnake.move()\n\t\t\tlast_tick = current_milli_time()\n\t\tscreen.fill((0,0,0))\n\t\tfor segment in snake.body:\t\n\t\t\tpygame.draw.rect(screen, (255, 255, 255), segment)\n\t\tpygame.draw.rect(screen, (255, 0, 0), game.apple_rect)\t\n\t\tpygame.display.flip()\n \n\n\nif __name__==\"__main__\":\n\tmain()\n","repo_name":"gappadyl/snake","sub_path":"snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":5575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"73296441916","text":"\ndef get_init_list(string):\n l = [ord(c) for c in string]\n l += [17, 31, 73, 47, 23]\n return l\n\n\ndef reverse_sublist(l_orig, index, length):\n right = index + length\n l_extended = l_orig * (int(right / len(l_orig) + 1))\n sublist = list(reversed(l_extended[index:right]))\n for i in range(length):\n l_orig[(i + index) % len(l_orig)] = sublist[i]\n\n\ndef get_sparse_hash(source):\n baselist = range(256)\n skip = 0\n pos = 0\n for turn in range(64):\n for length in source:\n reverse_sublist(baselist, pos, length)\n pos = (pos + length + skip) % len(baselist)\n skip += 1\n return baselist\n\ndef get_dense_hash(sparse_hash):\n dense = []\n i = 0\n while i < 256:\n c = sparse_hash[i]\n j = 0\n while j < 15:\n j+=1\n c ^= sparse_hash[i+j]\n dense.append(c)\n i+=16\n return dense\n\n\ndef get_hash(string):\n l = get_init_list(string)\n h = get_sparse_hash(l)\n h = get_dense_hash(h)\n h = \"\".join([\"%02X\" % x for x in h]).lower()\n return h\n\n\n# original\nf = open(\"../input/day10.txt\")\nlengths = f.read().strip()\nf.close()\n\n#lengths = \"AoC 2017\"\n#lengths = \"\"\nh = get_hash(lengths)\nprint(h)\n","repo_name":"Turysaz/advent_of_python_2017","sub_path":"day10/day10.1.py","file_name":"day10.1.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"26690820174","text":"from OpenGL import GL as gl\r\nimport numpy as np\r\nimport ctypes\r\nimport math\r\n\r\n\r\nclass Sphere:\r\n \"\"\"\r\n Mesh of an uv sphere, based on http://www.songho.ca/opengl/gl_sphere.html#sphere\r\n\r\n Implements vertex coordinates, normals and texture coordinates.\r\n \"\"\"\r\n def __init__(self, offset, segments, rings, radius):\r\n \"\"\"\r\n Constructor.\r\n\r\n Args:\r\n offset: tuple (x, y, z) specifying the offset to apply on each vertex coordinates.\r\n segments: the number of meridians\r\n rings: the number of parallels. Usually half the number of segments.\r\n radius: the radius of the sphere.\r\n \"\"\"\r\n\r\n self.offset_x, self.offset_y, self.offset_z = offset\r\n self.segments = segments\r\n self.rings = rings\r\n self.radius = radius\r\n\r\n self.vertices = []\r\n self.normals = []\r\n self.textures = []\r\n self.indices = []\r\n\r\n self._buildMesh()\r\n self._sendToGPU()\r\n\r\n def _buildMesh(self):\r\n segmentStep = 2*math.pi/self.segments\r\n ringStep = math.pi/self.rings\r\n\r\n for i in range(0, self.rings+1):\r\n u = math.pi/2-i*ringStep\r\n y = self.radius * math.sin(u) # z\r\n\r\n for j in range(0, self.segments+1):\r\n v = j * segmentStep\r\n z = self.radius * math.cos(u) * math.cos(v) # x\r\n x = self.radius * math.cos(u) * math.sin(v) # y\r\n\r\n self.vertices.extend([x+self.offset_x, y+self.offset_y, z+self.offset_z])\r\n self.normals.extend([c/self.radius for c in [x, y, z]])\r\n self.textures.extend([j/self.segments, i/self.rings])\r\n\r\n # indices\r\n # k1--k1+1\r\n # | / |\r\n # | / |\r\n # k2--k2+1\r\n\r\n for i in range(0, self.rings):\r\n k1 = i * (self.segments+1)\r\n k2 = k1+self.segments+1\r\n for j in range(0, self.segments):\r\n if i != 0:\r\n self.indices.extend([k1, k2, k1+1])\r\n if i != self.rings-1:\r\n self.indices.extend([k1+1, k2, k2+1])\r\n k1, k2 = k1+1, k2+1\r\n\r\n def _sendToGPU(self):\r\n interleavedData = []\r\n count = len(self.vertices)/3\r\n\r\n if not math.isclose(count, round(count)):\r\n raise RuntimeError(\"Number of vertices is not a multiple of 3 : {} != {}.\".format(count, round(count)))\r\n count = round(count)\r\n for i in range(count):\r\n interleavedData.extend(self.vertices[i*3:(i+1)*3])\r\n interleavedData.extend(self.normals[i*3:(i+1)*3])\r\n interleavedData.extend(self.textures[i*2:(i+1)*2])\r\n\r\n self.VAO = gl.glGenVertexArrays(1)\r\n self.VBO = gl.glGenBuffers(1)\r\n self.EBO = gl.glGenBuffers(1)\r\n\r\n gl.glBindVertexArray(self.VAO)\r\n\r\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.VBO)\r\n gl.glBufferData(gl.GL_ARRAY_BUFFER, np.array(interleavedData, dtype='float32'), gl.GL_STATIC_DRAW)\r\n\r\n gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.EBO)\r\n gl.glBufferData(gl.GL_ELEMENT_ARRAY_BUFFER, np.array(self.indices, dtype='uint32'), gl.GL_STATIC_DRAW)\r\n\r\n stride = 8*4\r\n gl.glVertexAttribPointer(0, 3, gl.GL_FLOAT, False, stride, ctypes.c_void_p(0))\r\n gl.glEnableVertexAttribArray(0)\r\n\r\n gl.glVertexAttribPointer(1, 3, gl.GL_FLOAT, False, stride, ctypes.c_void_p(3*4))\r\n gl.glEnableVertexAttribArray(1)\r\n\r\n gl.glVertexAttribPointer(2, 2, gl.GL_FLOAT, False, stride, ctypes.c_void_p(6*4))\r\n gl.glEnableVertexAttribArray(2)\r\n\r\n gl.glBindVertexArray(0)\r\n\r\n def draw(self):\r\n \"\"\"\r\n Draw the mesh\r\n \"\"\"\r\n gl.glBindVertexArray(self.VAO)\r\n gl.glDrawElements(gl.GL_TRIANGLES, len(self.indices), gl.GL_UNSIGNED_INT, None)\r\n\r\n def drawInstanced(self, number):\r\n gl.glBindVertexArray(self.VAO)\r\n gl.glDrawElementsInstanced(gl.GL_TRIANGLES, len(self.indices), gl.GL_UNSIGNED_INT, None, number)\r\n","repo_name":"ThomasChevalier/attenuationCalc","sub_path":"opengl/Sphere.py","file_name":"Sphere.py","file_ext":"py","file_size_in_byte":4052,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"96"} +{"seq_id":"16594663711","text":"#!/usr/bin/python3\n\nimport re\nfrom glob import glob\nfrom copy import deepcopy\nfrom math import log\nfrom arguments import get_arguments\n\nDOC_DIR = 'test_docs'\nNOT_WORD_RE = '[^\\w]'\nops = {'and': set.intersection,\n 'or': set.union,\n 'not': set.difference}\nargs = get_arguments()\n\n\ndef idf(term, docs):\n N = len(docs)\n df = count_docs_containing_term(term, [doc[1] for doc in docs])\n if df == 0:\n return 0\n else:\n return log(N/df)\n\n\ndef tf(term, doc):\n count = 0\n for word in doc:\n if word == term:\n count += 1\n return count\n\n\ndef count_docs_containing_term(term, docs):\n count = 0\n for doc in docs:\n if doc_contains_term(term, doc):\n count += 1\n return count\n\n\ndef doc_contains_term(term, doc):\n for word in doc:\n if word == term:\n return True\n\n \ndef load_docs(directory, dictionary):\n docs = []\n i = 0\n for filename in glob(directory + '/*'):\n i += 1\n if i > 2:\n break\n with open(filename) as readfile:\n docs.append((filename, tokenize(readfile.read(), dictionary)))\n return docs\n\n\ndef tokenize(doc, dictionary):\n doc = doc.replace('\\n', '').lower()\n doc = re.sub(NOT_WORD_RE, ' ', doc).split()\n doc = list(filter(lambda w: w in dictionary, doc))\n return doc\n\n\ndef make_postings_list(docs):\n postings_list= dict()\n idf_list = dict()\n for doc_id, doc in docs:\n count = 0\n for token in set(doc):\n if token not in idf_list:\n idf_list[token] = idf(token, docs)\n if postings_list.get(token):\n postings_list[token][doc_id] = tf(token, doc)\n else:\n count += 1\n postings_list[token] = {doc_id: tf(token, doc) * idf_list[token],\n 'idf': idf_list[token]}\n print(count)\n return postings_list\n\n\ndef get_query(prompt):\n return parse_query(input(prompt))\n\n \ndef parse_query(query):\n return query.lower().split(' ')\n\n \ndef infix_postfix(tokens):\n output = []\n stack = []\n for item in tokens:\n if item in ops:\n while stack:\n output.append(stack.pop())\n stack.append(item)\n else:\n output.append(item)\n while stack:\n output.append(stack.pop())\n return output\n\n\ndef binary_retrieve(postings_list, query, num_docs):\n all_docs = set()\n for term in postings_list:\n all_docs |= postings_list[term]\n pages_found = set()\n def _binary_retrieve(query):\n op = query.pop()\n if op == 'not':\n return all_docs - _binary_retrieve(query)\n elif op == 'and' or op == 'or':\n op1 = _binary_retrieve(query)\n op2 = _binary_retrieve(query)\n return ops[op](op1, op2)\n else:\n found = postings_list.get(op)\n if found:\n return found\n else:\n return set()\n \n return _binary_retrieve(query)\n\n\n\ndef ranked_retrieve(postings_list, query, max_retrieve=10):\n scores = dict()\n for term in query:\n postings_of_term = postings_list.get(term)\n if postings_of_term:\n term_weight = postings_of_term['idf']\n for doc_id, weight in postings_list[term].items():\n if scores.get(doc_id):\n scores[doc_id] += weight * term_weight\n else:\n scores[doc_id] = weight * term_weight\n return scores\n \n\ndef save_postings_list(filename, postings_list):\n with open(filename, 'w') as postings_list_file:\n for token, val in postings_list.items():\n postings_list_file.write('%s, ' % token)\n postings_list_file.write('%s, ' % val['idf'])\n for doc_id, tf in val.items():\n if doc_id != 'idf':\n postings_list_file.write('%s:%s ' % (doc_id, tf))\n postings_list_file.write('\\n')\n\n\nif __name__ == '__main__':\n print('Reading docs...')\n dictionary = set()\n with open('vocabulary.txt') as vocab:\n for word in vocab:\n dictionary.add(word.strip())\n docs = load_docs(DOC_DIR, dictionary)\n print('Done.')\n print('Generating postings list...')\n postings_list = make_postings_list(docs)\n print('Done.')\n print('Saving postings list...')\n filename = 'postings.post'\n save_postings_list(filename, postings_list)\n query = get_query('Enter a search query (Enter to quit): ')\n if args.b:\n query = infix_postfix(query)\n retrieved = binary_retrieve(postings_list, deepcopy(query), len(docs))\n print('Found documents: %d' % len(retrieved))\n query = get_query('Enter a search query (Enter to quit): ')\n else:\n retrieved = ranked_retrieve(postings_list, query, max_retrieve=10)\n for document_id, score in retrieved.items():\n print('%s. %s' % (document_id, score))\n \n","repo_name":"korzorro/information-retrieval","sub_path":"indexer.py","file_name":"indexer.py","file_ext":"py","file_size_in_byte":4997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"10085670656","text":"import random\r\n\r\nquestions = [\r\n {\r\n \"question\": \"What is the capital of France?\",\r\n \"correct_answer\": \"Paris\",\r\n \"incorrect_answers\": [\"Berlin\", \"Madrid\", \"Rome\"]\r\n },\r\n {\r\n \"question\": \"Which planet is known as the Red Planet?\",\r\n \"correct_answer\": \"Mars\",\r\n \"incorrect_answers\": [\"Venus\", \"Jupiter\", \"Saturn\"]\r\n },\r\n {\r\n \"question\": \"What is 2 + 2?\",\r\n \"correct_answer\": \"4\",\r\n \"incorrect_answers\": [\"3\", \"5\", \"6\"]\r\n }\r\n]\r\n\r\n\r\ndef ask_question(question_dict):\r\n print(question_dict[\"question\"])\r\n options = [question_dict[\"correct_answer\"]] + \\\r\n question_dict[\"incorrect_answers\"]\r\n random.shuffle(options)\r\n\r\n for i, option in enumerate(options, start=1):\r\n print(f\"{i}. {option}\")\r\n\r\n user_choice = int(input(\"Enter the number of your answer: \")) - 1\r\n if options[user_choice] == question_dict[\"correct_answer\"]:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef calculate_score(questions_list):\r\n score = 0\r\n for question in questions_list:\r\n if ask_question(question):\r\n score += 1\r\n print()\r\n return score\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(\"Welcome to the Quiz Game!\")\r\n print(\"You will be presented with a series of questions. Choose the correct answer.\")\r\n print(\"Let's begin!\\n\")\r\n\r\n user_score = calculate_score(questions)\r\n\r\n print(f\"Game Over! Your total score is: {user_score}/{len(questions)}\")\r\n","repo_name":"Ishankuukey/ARCANEGIT","sub_path":"quiz_game.py","file_name":"quiz_game.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"27015922764","text":"def shiftRowLeft(row):\n c = row.count(0)\n while 0 in row:\n row.remove(0)\n for i in range(c):\n row.append(0)\n\ndef shiftRowRight(row):\n c = row.count(0)\n while 0 in row:\n row.remove(0)\n for i in range(c):\n row.insert(0,0)\n\ndef mergeLeft(row):\n freeNdx = 0\n for i in range(1, 4):\n if row[i] == row[i - 1]:\n row[i - 1] = row[i] * 2\n row[i] = 0\n freeNdx = i\n break\n shiftRowLeft(row)\n for i in range(freeNdx + 1, 4):\n if row[i] == row[i - 1]:\n row[i - 1] = row[i] * 2\n row[i] = 0\n break\n shiftRowLeft(row)\n\ndef mergeRight(row):\n freeNdx = 3\n for i in range(2, -1, -1):\n if row[i] == row[i + 1]:\n row[i + 1] = row[i] * 2\n row[i] = 0\n freeNdx = i\n break\n shiftRowRight(row)\n for i in range(freeNdx-1, -1,-1):\n if row[i] == row[i + 1]:\n row[i + 1] = row[i] * 2\n row[i] = 0\n break\n shiftRowRight(row)\n\nw, h = 4, 4\nmatrix = [[0 for x in range(w)] for y in range(h)]\n\nfor i in range(4):\n line = list(map(int, input().split()))\n for j in range(4):\n matrix[i][j] = line[j]\ndirection = int(input())\nif direction == 0:\n for i in range(4):\n shiftRowLeft(matrix[i])\n mergeLeft(matrix[i])\nelif direction == 1:\n matrix = list(zip(*matrix))\n for i in range(4):\n matrix[i] = list(matrix[i])\n for i in range(4):\n shiftRowLeft(matrix[i])\n mergeLeft(matrix[i])\n matrix = list(zip(*matrix))\nelif direction == 2:\n for i in range(4):\n shiftRowRight(matrix[i])\n mergeRight(matrix[i])\nelse:\n matrix = list(zip(*matrix))\n for i in range(4):\n matrix[i] = list(matrix[i])\n for i in range(4):\n shiftRowRight(matrix[i])\n mergeRight(matrix[i])\n matrix = list(zip(*matrix))\n\nfor i in range(4):\n for j in range(3):\n print(matrix[i][j], end=\" \")\n print(matrix[i][3])","repo_name":"austinkeil/KattisSolutions","sub_path":"2048.py","file_name":"2048.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"16912115930","text":"'''\n4구역\n----------------\nT\nN\nNxN 당근의 개수\n---------------\n'''\ndef rotate_90(m):\n N = len(m)\n ret = [[0] * N for _ in range(N)]\n for r in range(N):\n for c in range(N):\n ret[c][N-1-r] = m[r][c]\n return ret\n\nT = int(input())\nfor tc in range(1, T+1):\n N = int(input().strip())\n carrot = [list(map(int, input().strip().split())) for _ in range (N)]\n\n max_carrot = 0\n min_carrot = 1000000\n for x in range(4):\n if x != 0:\n carrot = rotate_90(carrot)\n i = 0\n temp = 0\n while N-2*i > 0:\n temp += sum(carrot[i][i:N-i])\n i += 1\n max_carrot = temp if temp > max_carrot else max_carrot\n min_carrot = temp if temp < min_carrot else min_carrot\n print(f'#{tc} {max_carrot-min_carrot}')\n","repo_name":"jhj9109/TIL","sub_path":"swea/SC/8797/8797.py","file_name":"8797.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"21662209445","text":"flag = True\n\nfirst = int(input(\"введите первое число: \"))\nsec = int(input(\"введите второе число: \"))\ncom = input(\"введите операцию для вашей пары числел: \")\n\nwhile flag:\n\tif com == \"*\":\n\t\tres = first*sec\n\telif com == \"/\" and sec != 0:\n\t\tres = first/sec\n\telif com == \"+\":\n\t\tres = first+sec\n\telif com == \"-\":\n\t\tres = first-sec\n\telse:\n\t\tres = None\n\n\tprint(\"-->> \" , res)\n\tfirst = res\n\tsec = int(input(\"введите второе число: \"))\n\t\n\tcom = input(\"введите операцию или stop если не хотите продолжать: \")\n\tif com == \"stop\":\n\t\tflag = False\n\t\n","repo_name":"roman0810/PyLabs","sub_path":"lab1/lab1.py","file_name":"lab1.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"13417674223","text":"#imports\nimport json\n#difflib is a standard python library\n#used to get the similarity between two strings\nfrom difflib import get_close_matches\n\n#make a variable called data to open the file data.json\n#data.json contains key:value pairs for english words. \n#the datatype is python dictionary\n#the key is the word and the value is the meaning\ndata=json.load(open(\"data.json\"))\n\n#make a function that would take a string parameter\n#find the value of the word from the dictionary \ndef findMeaning(word):\n #make sure all the characters are lowercase\n word = word.lower()\n #if the word is found in the dictionary or data.json\n #then return the value of the word\n if word in data: \n return data[word]\n #to return the definition of words that start with a capital letter example Delhi or Paris\n elif word.title() in data:\n return data[word.title()]\n #to return the definition of acronyms (e.g. USA or NATO)\n elif word.upper() in data:\n return data[word.upper()]\n #if the word is close to another word found on the data\n #ask the user if they meant that word instead\n elif len(get_close_matches(word, data.keys())) > 0:\n yn = input(\"Did you mean %s instead? Enter Y if Yes or N if No: \" % get_close_matches(word,data.keys())[0])\n if yn == \"Y\":\n return data[get_close_matches(word,data.keys())[0]]\n elif yn == \"N\":\n return \"The word doesn't exist. Please double check the spelling\"\n else:\n return \"We did not understand your entry\"\n\n #if the word is not found then return a message\n else: \n return \"The word doesn't exist. Please double check the spelling\"\n\n \n\nwhile True: \n#take in an user input\n word = input(\"Enter word: \")\n\n#print out the meaning of the word the user entered\n output = findMeaning(word)\n if type(output) == list:\n for item in output:\n print(item)\n else:\n print(output)\n","repo_name":"nabeehanuba11/EnglishThesaurus","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"22548739241","text":"import time\r\n\r\nfrom datetime import datetime, date, timedelta as td\r\n\r\nimport pymysql\r\nimport redis\r\nimport json\r\nimport jieba\r\nimport codecs\r\n\r\n_KEY_EVENT_CHANNEL = 'twitter:sg:event:python:weibo:ab_test:a'\r\n_KEY_EVENT_PREFIX = _KEY_EVENT_CHANNEL + ':'\r\n_KEY_EVENT_KEYWORDS = _KEY_EVENT_PREFIX + 'keywords'\r\n_KEY_EVENT_KEYWORDS_TRACK2 = _KEY_EVENT_PREFIX + 'track2'\r\n_KEY_EVENT_KEYWORDS_WORDCLOUD = _KEY_EVENT_PREFIX + 'wordcloud'\r\n\r\nclass Cloud:\r\n\r\n def __init__(self):\r\n\r\n # self.start = start\r\n #\r\n # self.end = end\r\n\r\n self.delta = td(seconds=10)\r\n\r\n self.lag = td(minutes=2 * 60 + 30)\r\n\r\n self.host = '10.0.109.33'\r\n self.user = 'clear'\r\n self.db = 'clear'\r\n self.charset = 'utf8'\r\n\r\n self.connection = pymysql.connect(host=self.host, user=self.user, db=self.db, charset=self.charset, passwd='ClearDataBase2017^')\r\n\r\n def currentid(self):\r\n try:\r\n db = redis.StrictRedis('10.0.109.33', port=8181)\r\n nextid = db.get(_KEY_EVENT_PREFIX + 'nextId')\r\n id = int(nextid)\r\n except:\r\n print('redis connection failure')\r\n return id\r\n\r\n def info(self,id):\r\n kws =[]\r\n obj={}\r\n print(id)\r\n try:\r\n db = redis.StrictRedis('10.0.109.33', port=8181)\r\n content = db.get(_KEY_EVENT_PREFIX + str(id)).decode('utf-8')\r\n dic = json.loads(content)\r\n obj['kws'] = dic['info']['words']\r\n obj['tm1']= dic['summary']['tweet_partitions_count']['timestamps'][0]\r\n obj['tm2'] = dic['summary']['tweet_partitions_count']['timestamps'][-1]\r\n obj['id'] = id\r\n obj['timestamps']= dic['summary']['tweet_partitions_count']['timestamps']\r\n obj['counts'] = dic['summary']['tweet_partitions_count']['counts']\r\n obj['tweets']=dic ['summary']['original_tweets']\r\n\r\n # kws.append(tm1)\r\n # kws.append(tm2)\r\n # print('kws etc',obj)\r\n except:\r\n print('redis connection failure')\r\n db = redis.StrictRedis('10.0.109.33', port=8181)\r\n content = db.get(_KEY_EVENT_PREFIX + str(id)).decode('utf-8')\r\n dic = json.loads(content)\r\n obj['kws'] = dic['info']['words']\r\n obj['tm1']= dic['summary']['tweet_partitions_count']['timestamps'][0]\r\n obj['tm2'] = dic['summary']['tweet_partitions_count']['timestamps'][-1]\r\n obj['id'] = id\r\n obj['timestamps']= dic['summary']['tweet_partitions_count']['timestamps']\r\n obj['counts'] = dic['summary']['tweet_partitions_count']['counts']\r\n obj['tweets']=dic ['summary']['original_tweets']\r\n return obj\r\n\r\n def topkauthor(self,id,k):\r\n timestamps = self.info(id)['timestamps']\r\n counts = self.info(id)['counts']\r\n tweets = self.info(id)['tweets']\r\n rankdict = {}\r\n for item in counts:\r\n rankdict[item]= sum(counts[item])\r\n ranksorted = sorted(rankdict.items(), key=lambda x: x[1], reverse=True)\r\n topk = ranksorted[0:k]\r\n print(topk)\r\n print(type(topk))\r\n print(len(topk))\r\n authors=[]\r\n for ak in range(min(k,len(topk))):\r\n tweet = topk[ak][0]\r\n if topk[ak][1]>1:\r\n # if (tweet in tweets):\r\n authors.append(str(tweets[tweet][2]))\r\n print(authors)\r\n return authors\r\n\r\n def getauthorsinfo(self,authors):\r\n try:\r\n conn = pymysql.connect(host='123.56.187.168', user='zhouying', db='Chinese_stream', charset='utf8',\r\n passwd='Yingzhou765')\r\n\r\n cursor = conn.cursor()\r\n print(','.join(authors))\r\n sql_txt = 'select * from users where id in ('+ ','.join(authors) +')'\r\n print(sql_txt)\r\n cursor.execute(sql_txt)\r\n ret = cursor.fetchall()\r\n except:\r\n time.sleep(900)\r\n conn = pymysql.connect(host='123.56.187.168', user='zhouying', db='Chinese_stream', charset='utf8',\r\n passwd='Yingzhou765')\r\n\r\n cursor = conn.cursor()\r\n print(','.join(authors))\r\n sql_txt = 'select * from users where id in ('+ ','.join(authors) +')'\r\n print(sql_txt)\r\n cursor.execute(sql_txt)\r\n ret = cursor.fetchall()\r\n finally:\r\n # print res\r\n cursor.close()\r\n conn.close()\r\n return ret\r\n\r\n def insertauthorsinfo(self,ret):\r\n conn = pymysql.connect(host='10.0.109.33', user='clear', db='clear', charset='utf8', passwd='ClearDataBase2017^')\r\n cursor = conn.cursor()\r\n for row in ret:\r\n ls = list(row)\r\n ls[3]=ls[3].replace('\\'','')\r\n ls[-2]=ls[-2].replace('\\'','')\r\n newrow = tuple(ls)\r\n sql_insert = \"replace into users_all values('%d','%s','%s','%s','%s','%s','%d','%d','%d','%d','%s','%d','%s','%d','%d','%s','%s','%s','%s','%s','%s')\" % newrow\r\n # print(sql_insert)\r\n cursor.execute(sql_insert)\r\n conn.commit()\r\n cursor.close()\r\n conn.close()\r\n\r\n def relatedwb(self,id):\r\n ret = []\r\n # try:\r\n print('closing old connection...')\r\n self.connection.close()\r\n print('sleep 2 seconds')\r\n time.sleep(2)\r\n kw = self.info(id)['kws']\r\n t0 = self.info(id)['tm1']\r\n t1 = self.info(id)['tm2']\r\n print(kw)\r\n print(type(kw))\r\n str = '(instr(text,\\''+ kw[0]+'\\')>0)'\r\n # print(str)\r\n for k in range(1,len(kw)):\r\n app = '+(instr(text,\\''+ kw[k]+'\\')>0)'\r\n str += app\r\n print(str)\r\n sqlstr = 'SELECT text FROM clear.timelines2 where' + str + '>1 and created_at >\\'' +t0 +'\\' and created_at <\\'' +t1 +'\\''\r\n print(sqlstr)\r\n print('trying to reconnect db...')\r\n try:\r\n self.connection = pymysql.connect(host=self.host, user=self.user, db=self.db, charset=self.charset,\r\n passwd='ClearDataBase2017^')\r\n print('successful connection!')\r\n cursor = self.connection.cursor()\r\n cursor.execute(sqlstr)\r\n ret = cursor.fetchall()\r\n print(len(ret))\r\n cursor.close()\r\n except:\r\n self.connection = pymysql.connect(host=self.host, user=self.user, db=self.db, charset=self.charset,\r\n passwd='ClearDataBase2017^')\r\n print('successful connection!')\r\n cursor = self.connection.cursor()\r\n cursor.execute(sqlstr)\r\n ret = cursor.fetchall()\r\n print(len(ret))\r\n cursor.close()\r\n return ret\r\n\r\n def getString(self, tweets, stopwords):\r\n txtstr = ''\r\n for row in tweets:\r\n tweet = row[0]\r\n try:\r\n txt = tweet.split(':')[1].replace('http', '').replace('\\n', ' ')\r\n except:\r\n txt = tweet\r\n\r\n txtstr = txtstr + ' ' + txt\r\n\r\n worddict = cloud.count(txtstr, stopwords)\r\n return worddict\r\n\r\n\r\n def stopword(self, filename):#delete stopword\r\n stopwords = set()\r\n with codecs.open(filename, 'r' ,'utf-8') as f:\r\n for line in f:\r\n stopwords.add(line.rstrip())\r\n # # f = open(filename, 'r')\r\n # line = f.readline().rstrip()\r\n # while line:\r\n # # stopwords.setdefault(line, 0)\r\n # # print(line)\r\n # stopwords.add(line)\r\n # line = f.readline().rstrip()\r\n # f.close()\r\n return stopwords\r\n\r\n\r\n def count(self, txtstr, stopwords):\r\n seg_generator = jieba.cut(txtstr) # split word\r\n print(seg_generator)\r\n seg_list = [i for i in seg_generator if i not in stopwords]\r\n seg_list = [i for i in seg_list if i != u' ']\r\n seg_list = r' '.join(seg_list)\r\n\r\n worddict = {}\r\n wordlist = seg_list.split(' ')\r\n for word in wordlist: # calculate word count\r\n # print word\r\n # word = word.encode('utf-8')\r\n if word not in worddict:\r\n worddict[word] = 1\r\n else:\r\n worddict[word] = worddict[word] + 1\r\n\r\n return worddict\r\n\r\n\r\n\r\n def analysis(self,id,n):\r\n # cloud = Cloud()\r\n stopwords = self.stopword('stopwords.txt')\r\n res = self.relatedwb(id)\r\n worddict = self.getString(res, stopwords)\r\n worddict[\"\\u200b\"]=0\r\n dictlist = sorted(worddict.items(), key=lambda x: x[1], reverse=True)\r\n maxcount = dictlist[0][1]\r\n # print(dictlist)\r\n result = dictlist[0: n]\r\n finallist = {}\r\n count_key = 0\r\n for k,v in dictlist:\r\n v = v*40.0/maxcount\r\n if v>=10:\r\n finallist[k]=v\r\n count_key += 1\r\n #finalsort = sorted(finallist.items(), key=lambda x: x[1], reverse=True)\r\n\r\n # print('result:',result[0])\r\n # print('finaldict',finallist)\r\n try:\r\n db = redis.StrictRedis('10.0.109.33', port=8181)\r\n json_obj = json.dumps(finallist)\r\n cloudkey = _KEY_EVENT_KEYWORDS_WORDCLOUD + ':'+str(id)\r\n db.set(cloudkey, json_obj)\r\n print(db.get(cloudkey))\r\n except:\r\n db = redis.StrictRedis('10.0.109.33', port=8181)\r\n json_obj = json.dumps(finallist)\r\n cloudkey = _KEY_EVENT_KEYWORDS_WORDCLOUD + ':'+str(id)\r\n db.set(cloudkey, json_obj)\r\n print(db.get(cloudkey))\r\n #return result\r\n\r\n\r\nif __name__ == \"__main__\":\r\n cloud=Cloud()\r\n n=15\r\n # for id in range(currentid - 5, currentid + 1):\r\n # authors = cloud.topkauthor(id, 4)\r\n # ainfo = cloud.getauthorsinfo(authors)\r\n # cloud.insertauthorsinfo(ainfo)\r\n # print time.strftime('%y-%m-%d %H:%M:%S', time.localtime(time.time()))\r\n # print 'finish'\r\n\r\n while True:\r\n currentid = cloud.currentid()\r\n print('current id is: %s' % (currentid))\r\n # for id in range(currentid-5, currentid+1):\r\n for id in range(currentid-40, currentid+1):\r\n cloud.analysis(id,n)\r\n authors = cloud.topkauthor(id, 4)\r\n if (authors==[]):\r\n print('eid',id,'no author,skip')\r\n else:\r\n ainfo = cloud.getauthorsinfo(authors)\r\n cloud.insertauthorsinfo(ainfo)\r\n print(time.strftime('%y-%m-%d %H:%M:%S', time.localtime(time.time())))\r\n print('eid',id,'done')\r\n\r\n print(\"finish,sleep 15 min\")\r\n time.sleep(900)\r\n","repo_name":"guo-ganggang/event-driven-gainiangu","sub_path":"1_weibo_hot_events_detection/preprocessing_events/wordcloud/wordcloud+authorurl.py","file_name":"wordcloud+authorurl.py","file_ext":"py","file_size_in_byte":10874,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"6746694703","text":"def get_dict_input():\n try:\n dict_str = input(\"Enter a dictionary in the format {'key1': value1, 'key2': value2, ...}: \")\n dictionary = eval(dict_str) # Parse the input string as a dictionary\n if not isinstance(dictionary, dict):\n raise ValueError(\"Invalid input. Please enter a valid dictionary.\")\n return dictionary\n except (SyntaxError, ValueError):\n print(\"Invalid input format. Please enter a dictionary in the correct format.\")\n return None\n\ndef intersection(dict1, dict2):\n return {key: dict1[key] for key in dict1 if key in dict2}\n\ndef union(dict1, dict2):\n result = dict1.copy()\n result.update(dict2)\n return result\n\ndef test_demorgans_laws(dict1, dict2):\n # De Morgan's Laws:\n # 1. not (A union B) == (not A) intersection (not B)\n law1_lhs = union(dict1, dict2)\n law1_rhs = intersection(dict1, dict2)\n is_law1_satisfied = law1_lhs == law1_rhs\n\n # 2. not (A intersection B) == (not A) union (not B)\n law2_lhs = intersection(dict1, dict2)\n law2_rhs = union(dict1, dict2)\n is_law2_satisfied = law2_lhs == law2_rhs\n\n return is_law1_satisfied, is_law2_satisfied\n\n# Input\nprint(\"Enter the first dictionary:\")\ndict1 = get_dict_input()\nprint(\"Enter the second dictionary:\")\ndict2 = get_dict_input()\n\nif dict1 is not None and dict2 is not None:\n # Test De Morgan's Laws\n law1_satisfied, law2_satisfied = test_demorgans_laws(dict1, dict2)\n\n if law1_satisfied and law2_satisfied:\n print(\"The input dictionaries satisfy De Morgan's Laws.\")\n else:\n print(\"The input dictionaries do not satisfy De Morgan's Laws.\")\n","repo_name":"SIBI-MS/Soft-Coputing","sub_path":"fuzzywithdemorgans.py","file_name":"fuzzywithdemorgans.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"28479524913","text":"from typing import Dict\n\nfrom Bio import SeqIO\nfrom frozendict import frozendict\n\n\ndef read_fasta(path) -> Dict[str, str]:\n \"\"\"Read and make immutable.\"\"\"\n out = {}\n with open(path) as handle:\n for record in SeqIO.parse(handle, \"fasta\"):\n out[record.id] = str(record.seq)\n return frozendict(out)\n\n\ndef write_fasta(data, path):\n \"\"\"Write to file.\"\"\"\n with open(path, \"w\") as f:\n for gid, seq in data.items():\n f.write(f\">{gid}\\n{seq}\\n\")\n","repo_name":"aaronmussig/impact-of-contamination-on-taxonomy","sub_path":"workflow/util/fasta.py","file_name":"fasta.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"38880942433","text":"import argparse\nimport json\nimport logging\nimport os\nimport tempfile\nfrom typing import Optional\n\nfrom onefuzztypes.enums import ContainerType, TaskType\nfrom onefuzztypes.models import NotificationConfig\nfrom onefuzztypes.primitives import Container\n\nfrom onefuzz.api import Onefuzz\nfrom onefuzz.templates import JobHelper\n\nSETUP = \"\"\"\ncd /onefuzz\napt-get update\napt-get -y install build-essential libbfd-dev libunwind-dev\ngit clone https://github.com/google/honggfuzz/\ncd /onefuzz/honggfuzz\nmake\n\"\"\"\n\n\ndef add_setup_script(of: Onefuzz, container: Container) -> None:\n with tempfile.TemporaryDirectory() as tmpdir:\n setup = os.path.join(tmpdir, \"setup.sh\")\n with open(setup, \"w\") as handle:\n handle.write(SETUP)\n of.containers.files.upload_file(container, setup)\n\n\ndef main() -> None:\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n parser.add_argument(\"setup_dir\", type=str, help=\"Target setup directory\")\n parser.add_argument(\n \"target_exe\", type=str, help=\"Target executable within setup directory\"\n )\n parser.add_argument(\"project\", type=str, help=\"Name of project\")\n parser.add_argument(\"name\", type=str, help=\"Name of target\")\n parser.add_argument(\"build\", type=str, help=\"Target build version.\")\n parser.add_argument(\"pool_name\", type=str, help=\"VM pool to use\")\n parser.add_argument(\n \"--duration\", type=int, default=1, help=\"Hours to run the fuzzing task\"\n )\n parser.add_argument(\"--inputs\", help=\"seeds to use\")\n parser.add_argument(\"--notification_config\", help=\"Notification configuration\")\n args = parser.parse_args()\n\n notification_config: Optional[NotificationConfig] = None\n if args.notification_config:\n with open(args.notification_config) as handle:\n notification_config = NotificationConfig.parse_obj(json.load(handle))\n\n of = Onefuzz()\n logging.basicConfig(level=logging.WARNING)\n of.logger.setLevel(logging.DEBUG)\n\n helper = JobHelper(\n of,\n of.logger,\n args.project,\n args.name,\n args.build,\n args.duration,\n pool_name=args.pool_name,\n target_exe=args.target_exe,\n )\n\n helper.define_containers(\n ContainerType.setup,\n ContainerType.readonly_inputs,\n ContainerType.crashes,\n ContainerType.inputs,\n ContainerType.reports,\n ContainerType.unique_reports,\n )\n helper.create_containers()\n helper.setup_notifications(notification_config)\n helper.upload_setup(args.setup_dir, args.target_exe)\n if args.inputs:\n helper.upload_inputs(args.inputs)\n\n add_setup_script(of, helper.container_name(ContainerType.setup))\n\n containers = [\n (ContainerType.setup, helper.container_name(ContainerType.setup)),\n (ContainerType.crashes, helper.container_name(ContainerType.crashes)),\n (ContainerType.reports, helper.container_name(ContainerType.reports)),\n (\n ContainerType.unique_reports,\n helper.container_name(ContainerType.unique_reports),\n ),\n ]\n\n of.logger.info(\"Creating generic_crash_report task\")\n\n job = helper.create_job()\n of.tasks.create(\n job.job_id,\n TaskType.generic_crash_report,\n helper.setup_relative_blob_name(args.target_exe, args.setup_dir),\n containers,\n pool_name=args.pool_name,\n duration=args.duration,\n )\n\n containers = [\n (ContainerType.tools, Container(\"honggfuzz\")),\n (ContainerType.setup, helper.container_name(ContainerType.setup)),\n (ContainerType.crashes, helper.container_name(ContainerType.crashes)),\n (\n ContainerType.inputs,\n helper.container_name(ContainerType.inputs),\n ),\n ]\n\n supervisor_options = [\n \"-n1\",\n \"--crashdir\",\n \"{crashes}\",\n \"-u\",\n \"-i\",\n \"{input_corpus}\",\n \"--\",\n \"{target_exe}\",\n \"{input}\",\n ]\n\n of.tasks.create(\n job.job_id,\n TaskType.generic_supervisor,\n helper.setup_relative_blob_name(args.target_exe, args.setup_dir),\n containers,\n pool_name=args.pool_name,\n supervisor_exe=\"/onefuzz/honggfuzz/honggfuzz\",\n supervisor_options=supervisor_options,\n supervisor_input_marker=\"___FILE___\",\n duration=args.duration,\n vm_count=1,\n tags=helper.tags,\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"microsoft/onefuzz","sub_path":"src/cli/examples/honggfuzz.py","file_name":"honggfuzz.py","file_ext":"py","file_size_in_byte":4487,"program_lang":"python","lang":"en","doc_type":"code","stars":2783,"dataset":"github-code","pt":"96"} +{"seq_id":"32768027720","text":"import csv\nimport os\nimport re\nimport sys\nfrom pathlib import Path\n\ncurrent_dir = os.path.split(os.path.abspath(__file__))[0]\nmain_dir = current_dir[:current_dir.rfind(\"\\\\\") + 1].split(\"\\\\\")\nmain_dir = ''.join(stuff + \"\\\\\" for stuff in main_dir[:-2]) # one folder further back\nsys.path.insert(1, main_dir)\ndata_dir = os.path.join(main_dir, \"data\")\n\nmodule_name = \"historical\"\n\nanimation_folder = Path(os.path.join(data_dir, \"animation\"))\nsub1_directories = [os.path.normpath(x).split(os.sep)[-1] for x in animation_folder.iterdir() if not x.is_dir()]\n\n\ndef natural_sort(l):\n convert = lambda text: int(text) if text.isdigit() else text.lower()\n alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]\n return sorted(l, key=alphanum_key)\n\n\ndef sort_animation(pool, header):\n name_list = list(set(pool.keys()))\n name_list = natural_sort(name_list)\n print(name_list)\n\n animation_pool = {}\n for new_stuff in name_list:\n try:\n animation_pool[new_stuff] = pool[new_stuff]\n except KeyError:\n pass\n new_pool = animation_pool\n\n with open(os.path.join(animation_folder, animation_race), mode=\"w\",\n encoding='utf-8',\n newline=\"\") as edit_file:\n filewriter = csv.writer(edit_file, delimiter=\",\", quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n save_list = new_pool\n final_save = [[item for item in header]]\n for item in list(save_list.items()):\n final_save.append([item[0]] + item[1])\n for row in final_save:\n filewriter.writerow(row)\n edit_file.close()\n\n\nfor animation_race in sub1_directories:\n if animation_race not in (\"readme.md\", \"template.csv\"):\n with open(os.path.join(animation_folder, animation_race), encoding=\"utf-8\",\n mode=\"r\") as edit_file:\n rd = csv.reader(edit_file, quoting=csv.QUOTE_MINIMAL)\n rd = [row for row in rd]\n part_name_header = rd[0]\n animation_pool = {}\n for row_index, row in enumerate(rd):\n if row_index > 0:\n key = row[0]\n row = row[1:]\n animation_pool[key] = [item for item_index, item in enumerate(row)]\n pool = animation_pool\n edit_file.close()\n\n sort_animation(pool, part_name_header)\n\n","repo_name":"remance/Royal-Ordains","sub_path":"animation-maker/script/sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"96"} +{"seq_id":"15101003592","text":"from subprocess import Popen\nimport argparse\ndb = 'rgb(250, 187, 25)'\ndf = 'rgb(255, 218, 114)'\ndef genIcon(args):\n color1 = args.ffill\n color2 = args.bfill\n mainfile = args.clipart\n outname = args.name\n \n cmd1 = ['convert','front.png','-fill',f\"{color1}\",\"-colorize\",\"100\",\"front1.png\"]\n cmd2 = ['convert','back.png','-fill',f\"{color2}\",'-colorize',\"100\",\"back1.png\"]\n overlaycmd = 'convert front1.png back1.png -gravity center -composite merge1.png'.split()\n copier = f'convert {mainfile} out.png'.split()\n trimmer = f'convert out.png -trim out.png'.split()\n resizer = f'convert out.png -resize 160x160 out.png'.split()\n extent = f'convert out.png -background none -gravity center -extent 256x256 out.png'.split()\n \n overlaycmd2 = f'convert merge1.png out.png -gravity center -geometry -0+10 -composite {outname}'.split()\n cleaner = 'rm front1.png back1.png out.png merge1.png'.split()\n for cmd in [cmd1,cmd2,overlaycmd]:\n Popen(cmd).wait()\n if(len(mainfile)>0):\n Popen(copier).wait()\n if(args.trim):\n Popen(trimmer).wait()\n Popen(resizer).wait()\n # Popen(extent).wait()\n Popen(overlaycmd2).wait()\n else:\n Popen(f'convert merge1.png {outname}'.split())\n Popen(cleaner).wait()\nif __name__=='__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--ffill',help=\"front fill\",type=str,default=df)\n parser.add_argument('--bfill',help=\"back fill\",type=str,default=db)\n parser.add_argument('--clipart',help='Clipart put on front',type=str,default='')\n parser.add_argument('--trim',help=\"Trim the transparent padding around the clipart\",action='store_true')\n parser.add_argument('--name',help=\"Name of output file\",type=str,default='final.ico')\n args = parser.parse_args()\n genIcon(args)","repo_name":"hardik-rajpal/folderIconGen","sub_path":"icon.py","file_name":"icon.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"36668503517","text":"#!/usr/bin/env python3.8\n# https://adventofcode.com/2021/day/14\n\n\nfrom collections import Counter, defaultdict\nfrom sys import argv\n\n\ndef parse(input):\n polymer = [c for c in input[0]]\n rules = {x[0]: x[1] for x in [x.split(' -> ') for x in input[2:]]}\n # print(f'{polymer=} {rules=}')\n return polymer, rules\n\n\ndef q1(input):\n polymer, rules = parse(input)\n for i in range(10):\n inserts = 0\n for x in range(len(polymer)):\n start = x + inserts\n pair = ''.join(polymer[start:start+2])\n if pair in rules:\n insert = rules[pair]\n # print(f' insert {pair}->{insert} @ {start} ({x}+{inserts})')\n polymer.insert(start+1, insert)\n inserts += 1\n # print(f'{i}: {\"\".join(polymer)} ({len(polymer)})')\n c = Counter(polymer).most_common()\n return c[0][1] - c[-1][1]\n\n\ndef q2(input):\n polymer, rules = parse(input)\n pairs = defaultdict(int)\n for i in range(len(polymer)-1):\n pairs[''.join(polymer[i:i+2])] += 1\n\n for i in range(40):\n # print(f' {i=} {pairs}')\n newpairs = defaultdict(int)\n for pair, count in pairs.items():\n if pair in rules:\n insert = rules[pair]\n newpairs[pair[0] + insert] += count\n newpairs[insert + pair[1]] += count\n else:\n newpairs[pair] += count\n pairs = newpairs\n\n element_counts = defaultdict(int)\n for pair, count in pairs.items():\n element_counts[pair[0]] += count\n element_counts[pair[1]] += count\n element_counts[polymer[0]] += 1 # start isn't double-counted, make it so\n element_counts[polymer[-1]] += 1 # end isn't double-counted, make it so\n ordered = sorted([int(n/2) for n in element_counts.values()]) # un-double-count all elements\n # print(f'{element_counts=}, {ordered=}')\n return ordered[-1] - ordered[0]\n\n\nif len(argv) > 1:\n with open(argv[1], 'r') as f:\n print(q2([l.strip() for l in f]))\nelse:\n print(q2([l.strip() for l in '''\nNNCB\n\nCH -> B\nHH -> N\nCB -> H\nNH -> C\nHB -> C\nHC -> B\nHN -> C\nNN -> C\nBH -> H\nNC -> B\nNB -> B\nBN -> B\nBB -> N\nBC -> B\nCC -> N\nCN -> C\n'''.splitlines()[1:]]))\n","repo_name":"wheresbarney/Advent-of-Code-2021","sub_path":"day14/aoc.py","file_name":"aoc.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"33031927579","text":"\"\"\"Testing DDD17 utility.\n\nAuthor: Yuhuang Hu\nEmail : duguyue100@gmail.com\n\"\"\"\nfrom __future__ import print_function\n\nimport os\nimport Queue\nimport time\nfrom copy import deepcopy\n\nimport numpy as np\n\nimport spiker\nfrom spiker import data\nfrom spiker.data import ddd17\nfrom spiker import log\n\n# load data\n# file_name = os.path.join(spiker.SPIKER_DATA, \"ddd17\",\n# \"highway-down-2.hdf5\")\n# file_name = os.path.join(spiker.SPIKER_DATA, \"ddd17\",\n# \"highway-down-1.hdf5\")\n# file_name = os.path.join(spiker.SPIKER_DATA, \"ddd17\",\n# \"highway-up-1.hdf5\")\n# file_name = os.path.join(spiker.SPIKER_DATA, \"ddd17\",\n# \"highway-up-2.hdf5\")\n\n# CVPR data\n# Night 1\n# file_name = os.path.join(spiker.SPIKER_DATA, \"ddd17\",\n# \"jul09/rec1499656391.hdf5\")\n# Night 2\n# file_name = os.path.join(spiker.SPIKER_DATA, \"ddd17\",\n# \"jul09/rec1499657850.hdf5\")\n# Night 3\n# file_name = os.path.join(spiker.SPIKER_DATA, \"ddd17\",\n# \"aug01/rec1501649676.hdf5\")\n# Night 4\n# file_name = os.path.join(spiker.SPIKER_DATA, \"ddd17\",\n# \"aug01/rec1501650719.hdf5\")\n# Night 5\n# file_name = os.path.join(spiker.SPIKER_DATA, \"ddd17\",\n# \"aug05/rec1501994881.hdf5\")\n# Night 6\n# file_name = os.path.join(spiker.SPIKER_DATA, \"ddd17\",\n# \"aug09/rec1502336427.hdf5\")\n# Night 7\n# file_name = os.path.join(spiker.SPIKER_DATA, \"ddd17\",\n# \"aug09/rec1502337436.hdf5\")\n# Day 1\n# file_name = os.path.join(spiker.SPIKER_DATA, \"ddd17\",\n# \"jul16/rec1500220388.hdf5\")\n# Day 2\n# file_name = os.path.join(spiker.SPIKER_DATA, \"ddd17\",\n# \"jul18/rec1500383971.hdf5\")\n# Day 3\n# file_name = os.path.join(spiker.SPIKER_DATA, \"ddd17\",\n# \"jul18/rec1500402142.hdf5\")\n# Day 4\n# file_name = os.path.join(spiker.SPIKER_DATA, \"ddd17\",\n# \"jul28/rec1501288723.hdf5\")\n# Day 5\n# file_name = os.path.join(spiker.SPIKER_DATA, \"ddd17\",\n# \"jul29/rec1501349894.hdf5\")\n# Day 6\n# file_name = os.path.join(spiker.SPIKER_DATA, \"ddd17\",\n# \"aug01/rec1501614399.hdf5\")\n# Day 7\n# file_name = os.path.join(spiker.SPIKER_DATA, \"ddd17\",\n# \"aug02/rec1501705481.hdf5\")\n# file_name = os.path.join(spiker.SPIKER_DATA, \"ddd17\",\n# \"aug13/rec1502667889.hdf5\")\n# file_name = os.path.join(spiker.SPIKER_DATA, \"ddd17\",\n# \"jul20/rec1500552390.hdf5\")\nfile_name = os.path.join(spiker.SPIKER_DATA, \"ddd17\",\n \"aug08/rec1502241196.hdf5\")\n# Day 8\n# file_name = os.path.join(spiker.SPIKER_DATA, \"ddd17\",\n# \"aug15/rec1502825681.hdf5\")\n\nbinsize = 0.1\nfixed_dt = binsize > 0\nclip_value = 8\n\nf_in = ddd17.HDF5Stream(file_name, ddd17.EXPORT_DATA_VI.union({'dvs'}))\nmerged = ddd17.MergedStream(f_in)\n\ntime_start = merged.tmin+1e6*0\ntime_stop = merged.tmax\n\nprint(time_start)\nprint(time_stop)\nprint((time_stop-time_start)/1e6/60)\n\n# find the start point\nmerged.search(time_start)\n\n# collect all time\ndtypes = {k: float for k in ddd17.EXPORT_DATA.union({'timestamp'})}\n\n# exporting both aps and dvs\ndtypes['aps_frame'] = (np.uint8, data.DAVIS346_SHAPE)\ndtypes['dvs_frame'] = (np.int16, data.DAVIS346_SHAPE)\n\noutfile = file_name[:-5] + '-export.hdf5'\n\nf_out = ddd17.HDF5(outfile, dtypes, mode='w',\n chunksize=8, compression='gzip')\n\n# current sample\ncurrent_row = {k: 0 for k in dtypes}\ncurrent_row['aps_frame'] = np.zeros(data.DAVIS346_SHAPE, dtype=np.uint8)\ncurrent_row['dvs_frame'] = np.zeros(data.DAVIS346_SHAPE, dtype=np.int16)\n\nsys_ts, t_pre, t_offset, ev_count, pbar_next = 0, 0, 0, 0, 0\n\nwhile merged.has_data and sys_ts <= time_stop*1e-6:\n try:\n sys_ts, d = merged.get()\n except Queue.Empty:\n # wait for queue to fill up\n time.sleep(0.01)\n continue\n if not d:\n # skip unused data\n continue\n if d['etype'] == 'special_event':\n ddd17.unpack_data(d)\n if any(d['data'] == 0):\n # this is a timestamp reset\n print('ts reset detected, setting offset',\n current_row['timestamp'])\n t_offset += current_row['timestamp']\n # NOTE the timestamp of this special event is not meaningful\n continue\n if d['etype'] in ddd17.EXPORT_DATA_VI:\n current_row[d['etype']] = d['data']\n continue\n if t_pre == 0 and d['etype'] in ['frame_event', 'polarity_event']:\n print('resetting t_pre (first %s)' % d['etype'])\n t_pre = d['timestamp'] + t_offset\n if d['etype'] == 'frame_event':\n if fixed_dt:\n while t_pre + binsize < d['timestamp'] + t_offset:\n # aps frame is not in current bin -> save and proceed\n f_out.save(deepcopy(current_row))\n current_row['dvs_frame'][:, :] = 0\n current_row['timestamp'] = t_pre\n t_pre += binsize\n else:\n current_row['timestamp'] = d['timestamp'] + t_offset\n current_row['aps_frame'] = ddd17.filter_frame(ddd17.unpack_data(d))\n # current_row['timestamp'] = t_pre\n # JB: I don't see why the previous line should make sense\n continue\n if d['etype'] == 'polarity_event':\n ddd17.unpack_data(d)\n times = d['data'][:, 0] * 1e-6 + t_offset\n num_evts = d['data'].shape[0]\n print (\"Number of events in this frame: %d\"\n % (num_evts))\n offset = 0\n if fixed_dt:\n # fixed time interval bin mode\n num_samples = int(np.ceil((times[-1] - t_pre) / binsize))\n for _ in xrange(num_samples):\n # take n events\n n = (times[offset:] < t_pre + binsize).sum()\n sel = slice(offset, offset + n)\n current_row['dvs_frame'] += \\\n ddd17.raster_evts(d['data'][sel], clip_value=1)\n # current_row['dvs_frame'] += \\\n # ddd17.raster_evts_new(d['data'][sel])\n offset += n\n # save if we're in the middle of a packet, otherwise\n # wait for more data\n if sel.stop < num_evts:\n current_row['timestamp'] = t_pre\n f_out.save(deepcopy(current_row))\n current_row['dvs_frame'][:, :] = 0\n t_pre += binsize\n np.clip(current_row['dvs_frame'], -clip_value, clip_value,\n out=current_row['dvs_frame'])\n else:\n # fixed event count mode\n num_samples = np.ceil(-float(num_evts + ev_count)/binsize)\n for _ in xrange(int(num_samples)):\n n = min(int(-binsize - ev_count), num_evts - offset)\n sel = slice(offset, offset + n)\n current_row['dvs_frame'] += ddd17.raster_evts(d['data'][sel])\n if sel.stop > sel.start:\n current_row['timestamp'] = times[sel].mean()\n offset += n\n ev_count += n\n if ev_count == -binsize:\n f_out.save(deepcopy(current_row))\n current_row['dvs_frame'][:, :] = 0\n ev_count = 0\n np.clip(current_row['dvs_frame'], -clip_value, clip_value,\n out=current_row['dvs_frame'])\n\nprint('[DEBUG] sys_ts/time_stop', sys_ts, time_stop*1e-6)\nmerged.exit.set()\nf_out.exit.set()\nf_out.join()\nprint('[DEBUG] output done')\nwhile not merged.done.is_set():\n print('[DEBUG] waiting for merger')\n time.sleep(1)\nprint('[DEBUG] merger done')\nf_in.join()\nprint('[DEBUG] stream joined')\nmerged.join()\nprint('[DEBUG] merger joined')\nfilesize = os.path.getsize(outfile)\nprint('Finished. Wrote {:.1f}MiB to {}.'.format(filesize/1024**2, outfile))\n\ntime.sleep(1)\nos._exit(0)\n","repo_name":"duguyue100/spiker","sub_path":"spiker/scripts/ddd17_test.py","file_name":"ddd17_test.py","file_ext":"py","file_size_in_byte":8047,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"37119414769","text":"#!/usr/bin/env python\n\nimport sys\n\n\nif __name__ == '__main__':\n N, K = list(map(int, sys.stdin.readline().split()))\n c = sorted(map(int, sys.stdin.readline().split()))\n \n total = 0\n purchased = 0\n friends = [0] * K\n \n while len(c) > 0:\n total += (friends[purchased % len(friends)] + 1) * c.pop()\n friends[purchased % len(friends)] += 1\n purchased += 1\n \n print(total)\n","repo_name":"ssmehta/programming-challanges","sub_path":"hackerrank/algorithms/flowers/flowers.py","file_name":"flowers.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"96"} +{"seq_id":"14639294586","text":"from PIL import Image, ImageDraw\r\n\r\n\r\ndef triangle(width, height, color):\r\n im = Image.new(\"RGB\", (width, height))\r\n drawer = ImageDraw.Draw(im)\r\n\r\n drawer.polygon([(int(width * 0.5), int(height * 0.2)),\r\n (width * 0.9, height * 0.9), (width * 0.1, height * 0.9)],\r\n color)\r\n\r\n im.save('triangle.png')\r\n\r\n\r\ntriangle(100, 100, '#F443AF')","repo_name":"NejdanX/YandexLyceum","sub_path":"YandexLyceum/Первый год обучения/4 блок (библиотеки)/2) Работа с граф. файлами/Урок.py","file_name":"Урок.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"70825586555","text":"import torch\nimport torch.nn as nn\nfrom modules.res_block import ResBlock1d\n\nclass Gate(nn.Module):\n def __init__(self, dim_in, n_experts, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n block = [nn.Conv1d(dim_in, dim_in, kernel_size=5, padding=2, stride=1, padding_mode=\"reflect\"), \n nn.BatchNorm1d(dim_in), \n nn.LeakyReLU(0.1, True)]\n for k in range(3):\n block += [ResBlock1d(dim_in, dilation=3)]\n \n block += [nn.Conv1d(dim_in, n_experts, kernel_size=3, padding=1, stride=1, padding_mode=\"reflect\"),\n nn.BatchNorm1d(n_experts), \n nn.LeakyReLU(0.1, True)]\n \n for k in range(3):\n block += [ResBlock1d(n_experts, dilation=3)]\n self.block = nn.Sequential(*block)\n \n def forward(self, x):\n y = self.block(x)\n return y","repo_name":"avi33/audio-compression","sub_path":"modules/gate.py","file_name":"gate.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"201623477","text":"class Solution:\n def myAtoi(self, s: str) -> int:\n res = 0\n int_max = pow(2,31)-1\n int_min = -pow(2,31)\n index = 0\n sign = 1\n n = len(s)\n\n while index<n and s[index] == \" \":\n index +=1\n \n if index < n and s[index] == \"+\":\n index +=1\n sign = 1\n elif index < n and s[index] == '-':\n index +=1\n sign = -1\n \n while index < n and s[index].isdigit():\n digit = ord(s[index])-ord('0')\n \n if (res > int_max//10) or (res == int_max//10 and digit > int_max % 10):\n return int_max if sign == 1 else int_min\n res = res*10 + digit\n index +=1\n return res*sign","repo_name":"aryan1107/LeetcodeGrind","sub_path":"8-string-to-integer-atoi/8-string-to-integer-atoi.py","file_name":"8-string-to-integer-atoi.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"27209565059","text":"from freud import Freud\nimport mysql.connector\nfrom words import Words\n\n# Analyze all new dream content\nf = Freud()\n\ncnx = mysql.connector.connect(user='root', password='password', database='freud')\n# Get and process dreams\ncursor = cnx.cursor(buffered=True)\n\ndream_query = (\"\"\"\n select \n bin_to_uuid(id) as 'id',\n concat(title, '. ', description) as 'text'\n from\n dj.dream\n where\n not exists(\n select\n 1\n from\n freud.dream_word_freq dwf\n where\n dwf.dream_id = dream.id\n limit\n 1\n )\n ;\n \"\"\")\n\ncursor.execute(dream_query)\n\nif cursor.rowcount > 0:\n f = Freud()\n words = Words()\n for (id, text) in cursor:\n text = f.preprocess_dream_text(text)\n dream_word_freq = f.process_dream(id, text)\n for dwf in dream_word_freq:\n words.add_dream_frequency(dwf[0], dwf[1], dwf[2], dwf[3])\nelse:\n print('No dreams to process.')","repo_name":"sheldonjuncker/dj-yii","sub_path":"da/analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"41170008743","text":"from lowdown.core.authors.models import Author\nfrom .serializers import AuthorSerializer\nfrom rest_framework import generics\n\n\nclass ListAuthors(generics.ListAPIView):\n serializer_class = AuthorSerializer\n search_fields = ('name', )\n\n def get_queryset(self):\n queryset = Author.objects.all()\n\n if 'vertical' in self.kwargs:\n queryset = queryset.filter(vertical=self.kwargs['vertical'])\n return queryset\n","repo_name":"brudil/lowdown","sub_path":"lowdown/manage/authors/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"43273812014","text":"# -----Akhbaar Padhke Sunaao-----\nimport requests\nimport json\nfrom win32com.client import Dispatch\n\n\ndef speak(str):\n tell = Dispatch(\"SAPI.SpVoice\")\n tell.Speak(str)\n\n\nif __name__ == '__main__':\n url = 'https://newsapi.org/v2/top-headlines?country=in&apiKey=94a53d42a2d64fda92c90d8008922880'\n news = requests.get(url)\n news_dict = json.loads(news.content)\n arts = news_dict['articles']\n speak(\"News for today.. Let's begin\")\n i = 1\n for article in arts:\n print(article['title'])\n speak(f\"News number {i}: {article['title']}\")\n i += 1\n speak(\"Thanks for listening.\")\n","repo_name":"NnIiKk-prasad/Language-Learning","sub_path":"Python/2. Exercise/09-News_Reading/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"7631188274","text":"import logging\nimport weakref\nfrom typing import Tuple, Union\nfrom pathlib import Path\nimport open3d as o3d\nimport numpy as np\n\ntry:\n import raybender as rb\n import raybender.utils as rbutils\nexcept ImportError as error:\n logging.getLogger(__name__).error(\n 'Could not find raybender; install it from https://github.com/cvg/raybender.')\n raise error\n\nfrom ..capture import Pose, Camera\nfrom ..utils.geometry import to_homogeneous\nfrom ..utils.io import read_mesh\n\n\ndef compute_rays(T_cam2w: Pose, camera: Camera, stride: int = 1):\n w, h = np.array((camera.width, camera.height)) // stride\n center = T_cam2w.t\n origins = np.tile(center.astype(np.float32)[None], (h*w, 1))\n\n p2d = np.mgrid[:h, :w].reshape(2, -1)[::-1].T.astype(np.float32)\n p2d += 0.5 # to COLMAP coordinates\n if stride != 1:\n p2d *= stride\n p2d_norm = camera.image2world(p2d)\n R = T_cam2w.R\n # It is much faster to perform the transformation in fp32\n directions = to_homogeneous(p2d_norm.astype(np.float32)) @ R.astype(np.float32).T\n # directions = to_homogeneous(p2d_norm) @ R.T\n\n # Outputs must be contiguous.\n origins = np.ascontiguousarray(origins, dtype=np.float32)\n directions = np.ascontiguousarray(directions, dtype=np.float32)\n return origins, directions\n\n\nclass Renderer:\n def __init__(self, mesh: Union[Path, o3d.geometry.TriangleMesh]):\n if isinstance(mesh, Path):\n mesh = read_mesh(mesh)\n assert isinstance(mesh, o3d.geometry.TriangleMesh)\n\n vertices = np.asarray(mesh.vertices).astype(np.float32)\n if mesh.has_vertex_colors():\n vertex_colors = np.asarray(mesh.vertex_colors).astype(np.float32)\n else:\n vertex_colors = np.random.rand(vertices.shape[0], 3).astype(np.float32)\n triangles = np.asarray(mesh.triangles).astype(np.int32)\n del mesh\n\n self.scene = rb.create_scene()\n rb.add_triangle_mesh(self.scene, vertices, triangles)\n self.geom = (triangles, vertices, vertex_colors)\n\n # automatically cleanup the object if release is not called explicitly\n self._finalizer = weakref.finalize(self, self._release, self.scene)\n\n def render_from_capture(self, T_cam2w: Pose, camera: Camera, with_normals: bool = False,\n ) -> Tuple[np.ndarray]:\n T_w2cam = T_cam2w.inverse()\n ray_origins, ray_directions = compute_rays(T_cam2w, camera)\n\n geom_ids, bcoords = rb.ray_scene_intersection(\n self.scene, ray_origins, ray_directions)\n *_, tri_ids, bcoords, valid = rbutils.filter_intersections(geom_ids, bcoords)\n\n rgb, depth = rbutils.interpolate_rgbd_from_geometry(\n *self.geom, tri_ids, bcoords, valid,\n T_w2cam.R, T_w2cam.t, camera.width, camera.height)\n if not with_normals:\n return rgb, depth\n\n tris = self.geom[0][tri_ids]\n v = self.geom[1]\n normals = np.cross(v[tris[:, 0]] - v[tris[:, 1]], v[tris[:, 0]] - v[tris[:, 2]])\n normals /= np.linalg.norm(normals, axis=1, keepdims=True).clip(min=1e-7)\n inverted = np.einsum('nd,nd->n', normals, ray_directions[valid]) < 0\n normals[inverted] *= -1\n normals = normals @ T_cam2w.inv.R.T\n normal_map = np.zeros((*depth.shape, 3))\n normal_map[valid.reshape(*depth.shape)] = normals\n\n return rgb, depth, normal_map\n\n\n def compute_intersections(self, rays: Tuple) -> Tuple:\n origins, directions = rays\n origins = np.ascontiguousarray(origins, dtype=np.float32)\n directions = np.ascontiguousarray(directions, dtype=np.float32)\n\n geom_ids, bcoords = rb.ray_scene_intersection(self.scene, origins, directions)\n *_, tri_ids, bcoords, valid = rbutils.filter_intersections(geom_ids, bcoords)\n locations = rb.barycentric_interpolator(tri_ids, bcoords, *self.geom[:2])\n return locations, valid\n\n @staticmethod\n def _release(scene):\n if scene is not None:\n rb.release_scene(scene)\n\n def release(self):\n if self.scene is None:\n raise ValueError('Cannot release twice, create a new object.')\n self._release(self.scene)\n self.scene = None\n","repo_name":"microsoft/lamar-benchmark","sub_path":"scantools/proc/rendering.py","file_name":"rendering.py","file_ext":"py","file_size_in_byte":4225,"program_lang":"python","lang":"en","doc_type":"code","stars":331,"dataset":"github-code","pt":"96"} +{"seq_id":"11102899534","text":"#!/usr/bin/env python\n\"\"\"Get 4Runner vehicles from Toyota's inventory.\"\"\"\nimport datetime\nimport json\nimport uuid\n\nimport pandas as pd\nimport requests\n\n# Set this to True to query local data instead of Toyota's API.\nQUERY_LOCAL_DATA = True\n\nGRAPHQL_QUERY = \"\"\"query {\n locateVehiclesByZip(\n zipCode: \"78729\"\n brand: \"TOYOTA\"\n pageNo: %d\n pageSize: 250\n seriesCodes: \"4runner\"\n distance: 20000\n leadid: \"%s\"\n ) {\n pagination {\n pageNo\n pageSize\n totalPages\n totalRecords\n }\n vehicleSummary {\n vin\n stockNum\n brand\n marketingSeries\n year\n isTempVin\n dealerCd\n dealerCategory\n distributorCd\n holdStatus\n weightRating\n isPreSold\n dealerMarketingName\n dealerWebsite\n isSmartPath\n distance\n isUnlockPriceDealer\n transmission {\n transmissionType\n }\n price {\n advertizedPrice\n nonSpAdvertizedPrice\n totalMsrp\n sellingPrice\n dph\n dioTotalMsrp\n dioTotalDealerSellingPrice\n dealerCashApplied\n baseMsrp\n }\n options {\n optionCd\n marketingName\n optionType\n dealerSellingPrice\n msrp\n }\n model {\n modelCd\n marketingName\n marketingTitle\n }\n intColor {\n marketingName\n }\n extColor {\n marketingName\n }\n eta {\n currFromDate\n currToDate\n }\n engine {\n engineCd\n name\n }\n drivetrain {\n code\n title\n }\n family\n cab {\n code\n title\n }\n bed {\n code\n title\n }\n }\n }\n}\n\"\"\"\n\n\ndef query_toyota(page_number):\n \"\"\"Query Toyota for a page of vehicles.\"\"\"\n print(f\"Getting page {page_number}\")\n url = \"https://api.search-inventory.toyota.com/graphql\"\n headers = {\n \"referrer\": \"https://www.toyota.com/\",\n \"user-agent\": \"Mozilla/5.0 (X11; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/112.0\",\n \"accept\": \"*/*\",\n }\n json_post = {\"query\": GRAPHQL_QUERY % (page_number, str(uuid.uuid4()))}\n resp = requests.post(url, json=json_post, headers=headers)\n\n return resp.json()[\"data\"][\"locateVehiclesByZip\"][\"vehicleSummary\"]\n\n\ndef query_local_data():\n \"\"\"Query local data for a page of vehicles.\"\"\"\n return pd.json_normalize(json.load(open(\"4runners_raw.json\", \"r\")))\n\n\ndef get_all_vehicles():\n \"\"\"Get all vehicles from Toyota.\"\"\"\n df = pd.DataFrame()\n page_number = 1\n while True:\n try:\n vehicles = query_toyota(page_number)\n except Exception as exc:\n print(f\"Error: {exc}\")\n break\n\n if vehicles:\n df = pd.concat([df, pd.json_normalize(vehicles)])\n page_number += 1\n continue\n\n break\n\n return df\n\n\ndef add_dealer_state(df):\n \"\"\"Add the dealer's state to the data.\"\"\"\n dealers = pd.read_csv(\"dealers.csv\")[[\"dealerId\", \"state\"]]\n dealers.rename(columns={\"state\": \"Dealer State\"}, inplace=True)\n\n # Convert dealerCd to an integer.\n df[\"dealerCd\"] = df[\"dealerCd\"].apply(pd.to_numeric)\n\n return df.merge(dealers, left_on=\"dealerCd\", right_on=\"dealerId\")\n\n\ndef cleanup_columns(df):\n \"\"\"Replace data in columns.\"\"\"\n # Some colors have an extra cost tag. Remove it.\n df[\"Color\"] = df[\"Color\"].str.replace(\" [extra_cost_color]\", \"\", regex=False)\n\n # Calculate the dealer's final price.\n df[\"Dealer Price\"] = df[\"Base MSRP\"] + df[\"price.dioTotalDealerSellingPrice\"]\n\n # Fill any blanks with zeroes.\n df[\"Dealer Price\"] = df[\"Dealer Price\"].fillna(df[\"Base MSRP\"])\n\n df.drop(columns=[\"price.dioTotalDealerSellingPrice\"], inplace=True)\n\n return df\n\n\ndef cleanup_models(df):\n \"\"\"Replace model names with shorter versions.\"\"\"\n model_key = \"Model\"\n df[model_key] = df[model_key].str.replace(\"4Runner \", \"\")\n df[model_key] = df[model_key].str.replace(\"4Runner \", \"\")\n df[model_key] = df[model_key].str.replace(\n \"Anniversary Special Edition\", \"Anniversary\"\n )\n df[model_key] = df[model_key].str.replace(\"Off-Road Premium\", \"ORP\")\n\n return df\n\n\ndef remove_invalid_rows(df):\n \"\"\"Remove rows with invalid data.\"\"\"\n df = df[df[\"Color\"].notna()]\n\n # Remove any old models that might still be there.\n last_year = datetime.date.today().year - 1\n df.drop(df[df[\"Year\"] < last_year].index, inplace=True)\n\n return df\n\n\ndef translate_status(df):\n \"\"\"Translate the vehicle shipping status into useful values.\"\"\"\n statuses = {\n \"A\": \"Factory to port\",\n \"F\": \"Port to dealer\",\n \"G\": \"At dealer\",\n }\n return df.replace({\"Shipping Status\": statuses})\n\n\ndef translate_presold(df):\n \"\"\"Translate the vehicle presold status into useful values.\"\"\"\n statuses = {\n None: \"No\",\n False: \"No\",\n True: \"Yes\",\n }\n return df.replace({\"Pre-Sold\": statuses})\n\n\ndef make_view(df):\n \"\"\"Create a view of the dataframe with the columns we care about.\"\"\"\n df = df[\n [\n \"vin\",\n \"dealerCategory\",\n \"price.baseMsrp\",\n \"price.dioTotalDealerSellingPrice\",\n \"isPreSold\",\n \"holdStatus\",\n \"year\",\n \"drivetrain.code\",\n \"model.marketingName\",\n \"extColor.marketingName\",\n \"dealerMarketingName\",\n \"dealerWebsite\",\n \"Dealer State\",\n ]\n ].copy()\n return df.rename(\n columns={\n \"vin\": \"VIN\",\n \"price.baseMsrp\": \"Base MSRP\",\n \"model.marketingName\": \"Model\",\n \"extColor.marketingName\": \"Color\",\n \"dealerCategory\": \"Shipping Status\",\n \"dealerMarketingName\": \"Dealer\",\n \"dealerWebsite\": \"Dealer Website\",\n \"isPreSold\": \"Pre-Sold\",\n \"holdStatus\": \"Hold Status\",\n \"year\": \"Year\",\n \"drivetrain.code\": \"Drivetrain\",\n }\n )\n\n\ndef main():\n \"\"\"You might call this the main function. You would be right.\"\"\"\n if QUERY_LOCAL_DATA:\n raw_df = query_local_data()\n else:\n raw_df = get_all_vehicles()\n\n # Add a column for dealer state.\n df = add_dealer_state(raw_df)\n\n # Create a view with properly named columns.\n df = make_view(df)\n\n # Data cleanup.\n df = cleanup_columns(df)\n df = cleanup_models(df)\n\n # Translate values.\n df = translate_status(df)\n df = translate_presold(df)\n\n # Remove any invalid rows.\n df = remove_invalid_rows(df)\n\n # Get column ordering just right.\n df = df.reindex(\n columns=[\n \"VIN\",\n \"Model\",\n \"Year\",\n \"Drivetrain\",\n \"Color\",\n \"Base MSRP\",\n \"Dealer Price\",\n \"Shipping Status\",\n \"Hold Status\",\n \"Pre-Sold\",\n \"Dealer\",\n \"Dealer State\",\n \"Dealer Website\",\n ]\n )\n\n # Sort by VIN to avoid lots of repo churn.\n df = df.sort_values([\"VIN\"], ascending=[True])\n raw_df = raw_df.sort_values([\"vin\"], ascending=[True])\n\n # Write to the markdown file.\n df.info()\n # df.to_markdown(\"vehicles.md\", index=False)\n\n # Write JSON files, too.\n df.to_json(\"4runners.json\", orient=\"records\", indent=2)\n raw_df.to_json(\"4runners_raw.json\", orient=\"records\", indent=2)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"major/toyota-inventory","sub_path":"toyota_inventory/get_4runners.py","file_name":"get_4runners.py","file_ext":"py","file_size_in_byte":7435,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"96"} +{"seq_id":"75034903675","text":"from django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.template.response import TemplateResponse\ndef studenthomepage(request, homepage=\"studenthomepage.html\"):\n context ={}\n context[\"homepage_url\"] = 'studenthomepage'\n homepage = \"studenthomepage.html\"\n return TemplateResponse(request, homepage, context)\ndef parenthomepage(request):\n context={}\n context[\"homepage_url\"] = 'parenthomepage'\n homepage = \"parenthomepage.html\"\n return TemplateResponse(request, homepage, context)\ndef teacherhomepage(request):\n context={}\n context[\"homepage_url\"] = 'teacherhomepage'\n homepage = \"teacherhomepage.html\"\n return TemplateResponse(request, homepage, context)\ndef loginpage(request):\n return HttpResponse('loginpage')\n\n","repo_name":"ethanwcit/Dragonfly","sub_path":"Django/Dragonfly/Dragonfly/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"22504330083","text":"import pandas as pd\nfrom src.coordenadas import *\n\n\ndef cleaning(collection):\n\n filter = {\"$and\": [{\"deadpooled_year\": {\"$eq\": None}},\n {\"founded_year\": {\"$ne\": None}}]}\n\n projection = {\"name\": 1, \"offices\": 1, \"category_code\": 1,\n \"founded_year\": 1, \"total_money_raised\": 1}\n\n df = pd.DataFrame(list(collection.find(\n filter=filter, projection=projection)))\n\n df = df.explode('offices')\n\n dfOfficeData = df[[\"offices\"]].apply(\n lambda r: r.offices, result_type=\"expand\", axis=1)\n\n cleanData = pd.concat([df, dfOfficeData], axis=1)\n\n cleanData = cleanData.drop(columns=[\"_id\", \"offices\"])\n\n cleanData[\"location\"] = cleanData[[\"latitude\", \"longitude\"]].apply(\n lambda x: asGeoJSON(x.latitude, x.longitude), axis=1)\n\n cleanData = cleanData.drop(columns=[\"latitude\", \"longitude\"])\n\n return cleanData\n\n\ndef cleaning_money(string):\n\n division = list(string)\n\n lista = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]\n\n result = []\n\n for element in division:\n\n if element in lista:\n\n result.append(element)\n\n if element == \".\":\n\n result.append(element)\n\n union = \"\".join(result)\n\n return float(union)\n\n\ndef cleaning_tech(x):\n\n tech = [\"analytics\", \"biotech\", \"cleantech\", \"ecommerce\", \"hardware\",\n \"mobile\", \"nanotech\", \"network_hosting\", \"semiconductor\", \"software\"]\n\n if x in tech:\n\n return x\n\n else:\n\n return None\n\ndef cleaning_design(x):\n\n design = [\"design\", \"web\", \"advertising\", \"fashion\", \"games_video\"]\n\n if x in design:\n\n return x\n\n else:\n\n return None\n\n\n","repo_name":"Alexvidalcor/Analysis-Places","sub_path":"src/cleaning.py","file_name":"cleaning.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"73264418237","text":"from player import Player\nfrom card import *\nimport string\nclass Manager:\n def __init__(self, store):\n self.store = store\n self.player = Player(self,self.store)\n self.name = \"Superclass Manager\"\n self.turns = 0\n\n\n\n def take_turn(self):\n self.turns +=1\n self.player.do_turn()\n\n def get_points(self):\n return self.player.get_points()\n\n def get_hand(self):\n return self.player.get_hand()\n\n def play(self, name):\n return self.player.play(name)\n\n def do_actions(self):\n return\n\n def get_all(self):\n return self.player.get_all()\n\n def get_left(self, name):\n return self.player.get_left(name)\n\n def card_prompt(self, card, requested_args):\n print('Base manager prompted')\n pass\n\n\nclass Human(Manager):\n\n def __init__(self, store, name=\"human\"):\n super().__init__(store)\n self.name = name\n self.player = Player(self, store)\n\n def do_actions(self):\n actionPhase = True\n print('------------------------------')\n while self.player.actions > 0 and actionPhase:\n self.print_hand()\n a = input('Enter card to play (\"end\" to stop):').title()\n if a != 'End':\n if card_dict.get(a):\n print('playing ', a)\n self.play(a)\n\n else:\n print('try again')\n else:\n actionPhase = False\n\n\n def buy_cards(self):\n buyPhase = True\n while self.player.buys > 0 and buyPhase:\n print(self.player.store_str())\n print(\"Buys: {}, Money: {} \".format(self.player.buys,self.player.money))\n a = input('Enter card to buy (\"end\" to stop):').title()\n if a != 'End':\n self.player.buy(a)\n else:\n buyPhase = False\n\n def card_prompt(self, card, requested_args):\n a = input('enter input for {}: '.format(card.name)).title()\n if type(card) is card_dict['Chapel']:\n print('asdf')\n trash_list = []\n for token in a.split(\" \"):\n trash_list.append(int(token))\n kwargs = {'hand_locs': trash_list}\n card.use_effect(**kwargs)\n return\n #store, card\n #hand_locs\n kwargs = {'card_choice':a}\n if card.use_effect(**kwargs):\n pass\n\n\n\n def print_hand(self):\n turn = self.player.turns\n act = self.player.actions\n buy = self.player.buys\n money = self.player.money\n print(\"Turn: {} || Actions: {}, Buys: {}, Money: {}\"\n .format(turn,act,buy,money))\n print(self.get_hand())\n\n\nclass DumbMoney(Manager):\n num = 1\n def __init__(self,store, name = 'DumbManager'):\n super().__init__(store)\n self.name = \"DumbManager\"+str(num)\n num +=1\n self.player = Player(self,self.store)\n\n\n def buy_cards(self):\n money = self.player.money\n if money >= 8:\n self.player.buy(\"Province\")\n elif money >= 6:\n self.player.buy(\"Gold\")\n elif money >= 3:\n card = self.player.buy(\"Silver\")\n\nclass DumbMoneyV2(Manager):\n\n\n def __init__(self,store, name='DumbManagerV2'):\n super().__init__(store)\n self.name = name\n self.player = Player(self,self.store)\n self.smithy= 0\n\n\n def buy_cards(self):\n money = self.player.money\n if self.smithy < 2 and money in (4,5):\n self.player.buy(\"Smithy\")\n self.smithy+=1\n if money >= 8:\n self.player.buy(\"Province\")\n elif money >= 6:\n self.player.buy(\"Gold\")\n elif money >= 3:\n card = self.player.buy(\"Silver\")\n\n def do_actions(self):\n hand = self.get_hand()\n have_smithy = any(isinstance(card,card_dict['Smithy']) for card in hand)\n if have_smithy:\n self.play(\"Smithy\")\n\n\nclass DumbMoneyV3(Manager):\n def __init__(self,store,name='DumbManagerV3'):\n super().__init__(store)\n self.name = name\n self.player = Player(self,self.store)\n self.smithy= 0\n\n def buy_cards(self):\n money = self.player.money\n if self.smithy < 2 and money in (4,5):\n self.player.buy(\"Smithy\")\n self.smithy+=1\n if money >= 8:\n self.player.buy(\"Province\")\n elif money >= 5 and self.get_left(\"Province\") < 3:\n self.player.buy(\"Duchy\")\n elif money >= 6:\n self.player.buy(\"Gold\")\n elif money >= 3:\n card = self.player.buy(\"Silver\")\n\n def do_actions(self):\n hand = self.get_hand()\n have_smithy = any(isinstance(card,card_dict['Smithy']) for card in hand)\n if have_smithy:\n self.play(\"Smithy\")\n\n\n\n\n","repo_name":"carl12/Dominion2","sub_path":"manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":4838,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"22460913490","text":"import execjs\nimport json\n\nitem={\n 'name': '凯文-杜兰特',\n 'image': 'durant.png',\n 'birthday': '1988-09-29',\n 'height': '208cm',\n 'weight': '108.9KG'\n}\n\n# 文件路径\nfile='crypto.js'\n# 获取js执行环境\nnode=execjs.get()\n# 将js代码放入执行环境中 相当于已经编译\nctx=node.compile(open(file).read())\n\n\njs=f\"getToken('{json.dumps(item,ensure_ascii=False)}')\"\nprint(js)\n# 调用js代码中的某些函数 ctx.eval(js)\nresult=ctx.eval(js)\nprint(result)","repo_name":"HandsomeLunHui/CrawlAndOj","sub_path":"crawl/runjs/pythonjs.py","file_name":"pythonjs.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"145757556","text":"\r\nimport requests\r\nfrom bs4 import BeautifulSoup as bs\r\n\r\n#BeaufifulSoup functions\r\n\"\"\"\r\nfind(element_tag, attribute) #return first matching item\r\nfind_all(element_tag, attribute) #return list of matching items\r\n\"\"\"\r\n\r\nclass BBC:\r\n def __init__(self, url:str):\r\n article = requests.get(url)\r\n self.soup = bs(article.content, \"html.parser\")\r\n self.body = self.get_body()\r\n self.title = self.get_title()\r\n \r\n \"\"\" \r\n def get_body(self) -> list:\r\n body = self.soup.find(property=\"articleBody\")\r\n return [p.text for p in body.find_all(\"p\")]\r\n \"\"\"\r\n \r\n def get_title(self) -> str:\r\n body = self.soup.find(class_=v_class).text\r\n return [p.text for p in body.find_all(\"p\")]\r\n\r\n \r\n \r\nif __name__ == \"__main__\":\r\n url = \"https://www.bbc.com/news/world-africa-59850904\"; #Webside URL\r\n v_class = \"ssrcss-1mc1y2-ArticleWrapper e1nh2i2l6\" #Innermost html element class which encapsulates content of interest (Can also be html property)\r\n \r\n bbc = BBC(url)\r\n s = get_bbc_text(url)\r\n \r\n \r\n# file = open(\"test_scratch.txt\", \"w\")\r\n# file1.write(s)\r\n","repo_name":"Jocho-Smith/fat_curd","sub_path":"bbc_sentiment/testing_the_prediction/beatuy_soup_exmple.py","file_name":"beatuy_soup_exmple.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"}